summaryrefslogtreecommitdiff
path: root/storage/bdb
diff options
context:
space:
mode:
authorunknown <brian@zim.(none)>2005-04-26 18:19:54 -0700
committerunknown <brian@zim.(none)>2005-04-26 18:19:54 -0700
commit0aa208edb178bab2b1911ce36f89277d4cf24b47 (patch)
treedf9016f3d70b4657f89dcddca2ec4e188fc7fbdf /storage/bdb
parent26e2ebb727e9e8b6961464d7869a84689ea36e8c (diff)
downloadmariadb-git-0aa208edb178bab2b1911ce36f89277d4cf24b47.tar.gz
Changes to create storage directory for storage engines.
storage/heap/.cvsignore: mvdir storage/heap/ChangeLog: mvdir storage/heap/Makefile.am: mvdir storage/heap/_check.c: mvdir storage/heap/_rectest.c: mvdir storage/heap/heapdef.h: mvdir storage/heap/hp_block.c: mvdir storage/heap/hp_clear.c: mvdir storage/heap/hp_close.c: mvdir storage/heap/hp_create.c: mvdir storage/heap/hp_delete.c: mvdir storage/heap/hp_extra.c: mvdir storage/heap/hp_hash.c: mvdir storage/heap/hp_info.c: mvdir storage/heap/hp_open.c: mvdir storage/heap/hp_panic.c: mvdir storage/heap/hp_rename.c: mvdir storage/heap/hp_rfirst.c: mvdir storage/heap/hp_rkey.c: mvdir storage/heap/hp_rlast.c: mvdir storage/heap/hp_rnext.c: mvdir storage/heap/hp_rprev.c: mvdir storage/heap/hp_rrnd.c: mvdir storage/heap/hp_rsame.c: mvdir storage/heap/hp_scan.c: mvdir storage/heap/hp_static.c: mvdir storage/heap/hp_test1.c: mvdir storage/heap/hp_test2.c: mvdir storage/heap/hp_update.c: mvdir storage/heap/hp_write.c: mvdir storage/heap/make-ccc: mvdir storage/myisam/.cvsignore: mvdir storage/myisam/ChangeLog: mvdir storage/myisam/Makefile.am: mvdir storage/myisam/NEWS: mvdir storage/myisam/TODO: mvdir storage/myisam/ft_boolean_search.c: mvdir storage/myisam/ft_eval.c: mvdir storage/myisam/ft_eval.h: mvdir storage/myisam/ft_nlq_search.c: mvdir storage/myisam/ft_parser.c: mvdir storage/myisam/ft_static.c: mvdir storage/myisam/ft_stem.c: mvdir storage/myisam/ft_stopwords.c: mvdir storage/myisam/ft_test1.c: mvdir storage/myisam/ft_test1.h: mvdir storage/myisam/ft_update.c: mvdir storage/myisam/ftdefs.h: mvdir storage/myisam/fulltext.h: mvdir storage/myisam/make-ccc: mvdir storage/myisam/mi_cache.c: mvdir storage/myisam/mi_changed.c: mvdir storage/myisam/mi_check.c: mvdir storage/myisam/mi_checksum.c: mvdir storage/myisam/mi_close.c: mvdir storage/myisam/mi_create.c: mvdir storage/myisam/mi_dbug.c: mvdir storage/myisam/mi_delete.c: mvdir storage/myisam/mi_delete_all.c: mvdir storage/myisam/mi_delete_table.c: mvdir storage/myisam/mi_dynrec.c: mvdir storage/myisam/mi_extra.c: mvdir storage/myisam/mi_info.c: mvdir storage/myisam/mi_key.c: mvdir storage/myisam/mi_keycache.c: mvdir storage/myisam/mi_locking.c: mvdir storage/myisam/mi_log.c: mvdir storage/myisam/mi_open.c: mvdir storage/myisam/mi_packrec.c: mvdir storage/myisam/mi_page.c: mvdir storage/myisam/mi_panic.c: mvdir storage/myisam/mi_preload.c: mvdir storage/myisam/mi_range.c: mvdir storage/myisam/mi_rename.c: mvdir storage/myisam/mi_rfirst.c: mvdir storage/myisam/mi_rkey.c: mvdir storage/myisam/mi_rlast.c: mvdir storage/myisam/mi_rnext.c: mvdir storage/myisam/mi_rnext_same.c: mvdir storage/myisam/mi_rprev.c: mvdir storage/myisam/mi_rrnd.c: mvdir storage/myisam/mi_rsame.c: mvdir storage/myisam/ftbench/Ecompare.pl: mvdir storage/myisam/ftbench/Ecreate.pl: mvdir storage/myisam/ftbench/Ereport.pl: mvdir storage/myisam/ftbench/README: mvdir storage/myisam/ftbench/ft-test-run.sh: mvdir storage/myisam/mi_rsamepos.c: mvdir storage/myisam/mi_scan.c: mvdir storage/myisam/mi_search.c: mvdir storage/myisam/mi_static.c: mvdir storage/myisam/mi_statrec.c: mvdir storage/myisam/mi_test1.c: mvdir storage/myisam/mi_test2.c: mvdir storage/myisam/mi_test3.c: mvdir storage/myisam/mi_test_all.res: mvdir storage/myisam/mi_test_all.sh: mvdir storage/myisam/mi_unique.c: mvdir storage/myisam/mi_update.c: mvdir storage/myisam/mi_write.c: mvdir storage/myisam/myisam_ftdump.c: mvdir storage/myisam/myisamchk.c: mvdir storage/myisam/myisamdef.h: mvdir storage/myisam/myisamlog.c: mvdir storage/myisam/myisampack.c: mvdir storage/myisam/rt_index.c: mvdir storage/myisam/rt_index.h: mvdir storage/myisam/rt_key.c: mvdir storage/myisam/rt_key.h: mvdir storage/myisam/rt_mbr.c: mvdir storage/myisam/rt_mbr.h: mvdir storage/myisam/rt_split.c: mvdir storage/myisam/rt_test.c: mvdir storage/myisam/sort.c: mvdir storage/myisam/sp_defs.h: mvdir storage/myisam/sp_key.c: mvdir storage/myisam/sp_test.c: mvdir storage/myisam/test_pack: mvdir storage/myisammrg/.cvsignore: mvdir storage/myisammrg/Makefile.am: mvdir storage/myisammrg/make-ccc: mvdir storage/myisammrg/myrg_close.c: mvdir storage/myisammrg/myrg_create.c: mvdir storage/myisammrg/myrg_def.h: mvdir storage/myisammrg/myrg_delete.c: mvdir storage/myisammrg/myrg_extra.c: mvdir storage/myisammrg/myrg_info.c: mvdir storage/myisammrg/myrg_locking.c: mvdir storage/myisammrg/myrg_open.c: mvdir storage/myisammrg/myrg_panic.c: mvdir storage/myisammrg/myrg_queue.c: mvdir storage/myisammrg/myrg_range.c: mvdir storage/myisammrg/myrg_rfirst.c: mvdir storage/myisammrg/myrg_rkey.c: mvdir storage/myisammrg/myrg_rlast.c: mvdir storage/myisammrg/myrg_rnext.c: mvdir storage/myisammrg/myrg_rnext_same.c: mvdir storage/myisammrg/myrg_rprev.c: mvdir storage/myisammrg/myrg_rrnd.c: mvdir storage/myisammrg/myrg_rsame.c: mvdir storage/myisammrg/myrg_static.c: mvdir storage/myisammrg/myrg_update.c: mvdir storage/myisammrg/myrg_write.c: mvdir storage/innobase/Makefile.am: mvdir storage/innobase/btr/Makefile.am: mvdir storage/innobase/btr/btr0btr.c: mvdir storage/innobase/btr/btr0cur.c: mvdir storage/innobase/btr/btr0pcur.c: mvdir storage/innobase/btr/btr0sea.c: mvdir storage/innobase/btr/makefilewin: mvdir storage/innobase/buf/Makefile.am: mvdir storage/innobase/buf/buf0buf.c: mvdir storage/innobase/buf/buf0flu.c: mvdir storage/innobase/buf/buf0lru.c: mvdir storage/innobase/buf/buf0rea.c: mvdir storage/innobase/buf/makefilewin: mvdir storage/innobase/configure.in: mvdir storage/innobase/data/Makefile.am: mvdir storage/innobase/data/data0data.c: mvdir storage/innobase/data/data0type.c: mvdir storage/innobase/data/makefilewin: mvdir storage/innobase/db/db0err.h: mvdir storage/innobase/dict/Makefile.am: mvdir storage/innobase/dict/dict0boot.c: mvdir storage/innobase/dict/dict0crea.c: mvdir storage/innobase/dict/dict0dict.c: mvdir storage/innobase/dict/dict0load.c: mvdir storage/innobase/makefilewin: mvdir storage/innobase/my_cnf: mvdir storage/innobase/dict/dict0mem.c: mvdir storage/innobase/dict/makefilewin: mvdir storage/innobase/dyn/Makefile.am: mvdir storage/innobase/dyn/dyn0dyn.c: mvdir storage/innobase/dyn/makefilewin: mvdir storage/innobase/eval/Makefile.am: mvdir storage/innobase/eval/eval0eval.c: mvdir storage/innobase/eval/eval0proc.c: mvdir storage/innobase/eval/makefilewin: mvdir storage/innobase/fil/Makefile.am: mvdir storage/innobase/fil/fil0fil.c: mvdir storage/innobase/fil/makefilewin: mvdir storage/innobase/fsp/Makefile.am: mvdir storage/innobase/fsp/fsp0fsp.c: mvdir storage/innobase/fsp/makefilewin: mvdir storage/innobase/fut/Makefile.am: mvdir storage/innobase/fut/fut0fut.c: mvdir storage/innobase/fut/fut0lst.c: mvdir storage/innobase/fut/makefilewin: mvdir storage/innobase/ha/Makefile.am: mvdir storage/innobase/ha/ha0ha.c: mvdir storage/innobase/ha/hash0hash.c: mvdir storage/innobase/ha/makefilewin: mvdir storage/innobase/ibuf/Makefile.am: mvdir storage/innobase/ibuf/ibuf0ibuf.c: mvdir storage/innobase/ibuf/makefilewin: mvdir storage/innobase/include/Makefile.am: mvdir storage/innobase/include/Makefile.i: mvdir storage/innobase/include/btr0btr.h: mvdir storage/innobase/include/btr0btr.ic: mvdir storage/innobase/include/btr0cur.h: mvdir storage/innobase/include/btr0cur.ic: mvdir storage/innobase/include/btr0pcur.h: mvdir storage/innobase/include/btr0pcur.ic: mvdir storage/innobase/include/btr0sea.h: mvdir storage/innobase/include/btr0sea.ic: mvdir storage/innobase/include/btr0types.h: mvdir storage/innobase/include/buf0buf.h: mvdir storage/innobase/include/buf0buf.ic: mvdir storage/innobase/include/buf0flu.h: mvdir storage/innobase/include/buf0flu.ic: mvdir storage/innobase/include/buf0lru.h: mvdir storage/innobase/include/buf0lru.ic: mvdir storage/innobase/include/buf0rea.h: mvdir storage/innobase/include/buf0types.h: mvdir storage/innobase/include/data0data.h: mvdir storage/innobase/include/data0data.ic: mvdir storage/innobase/include/data0type.h: mvdir storage/innobase/include/data0type.ic: mvdir storage/innobase/include/data0types.h: mvdir storage/innobase/include/db0err.h: mvdir storage/innobase/include/dict0boot.h: mvdir storage/innobase/include/dict0boot.ic: mvdir storage/innobase/include/dict0crea.h: mvdir storage/innobase/include/dict0crea.ic: mvdir storage/innobase/include/dict0dict.h: mvdir storage/innobase/include/dict0dict.ic: mvdir storage/innobase/include/dict0load.h: mvdir storage/innobase/include/dict0load.ic: mvdir storage/innobase/include/dict0mem.h: mvdir storage/innobase/include/dict0mem.ic: mvdir storage/innobase/include/dict0types.h: mvdir storage/innobase/include/dyn0dyn.h: mvdir storage/innobase/include/dyn0dyn.ic: mvdir storage/innobase/include/eval0eval.h: mvdir storage/innobase/include/eval0eval.ic: mvdir storage/innobase/include/eval0proc.h: mvdir storage/innobase/include/eval0proc.ic: mvdir storage/innobase/include/fil0fil.h: mvdir storage/innobase/include/fsp0fsp.h: mvdir storage/innobase/include/fsp0fsp.ic: mvdir storage/innobase/include/fut0fut.h: mvdir storage/innobase/include/fut0fut.ic: mvdir storage/innobase/include/fut0lst.h: mvdir storage/innobase/include/fut0lst.ic: mvdir storage/innobase/include/ha0ha.h: mvdir storage/innobase/include/ha0ha.ic: mvdir storage/innobase/include/hash0hash.h: mvdir storage/innobase/include/hash0hash.ic: mvdir storage/innobase/include/ibuf0ibuf.h: mvdir storage/innobase/include/ibuf0ibuf.ic: mvdir storage/innobase/include/ibuf0types.h: mvdir storage/innobase/include/lock0lock.h: mvdir storage/innobase/include/lock0lock.ic: mvdir storage/innobase/include/lock0types.h: mvdir storage/innobase/include/log0log.h: mvdir storage/innobase/include/log0log.ic: mvdir storage/innobase/include/log0recv.h: mvdir storage/innobase/include/log0recv.ic: mvdir storage/innobase/include/mach0data.h: mvdir storage/innobase/include/mach0data.ic: mvdir storage/innobase/include/makefilewin.i: mvdir storage/innobase/include/mem0dbg.h: mvdir storage/innobase/include/mem0dbg.ic: mvdir storage/innobase/include/mem0mem.h: mvdir storage/innobase/include/mem0mem.ic: mvdir storage/innobase/include/mem0pool.h: mvdir storage/innobase/include/mem0pool.ic: mvdir storage/innobase/include/mtr0log.h: mvdir storage/innobase/include/mtr0log.ic: mvdir storage/innobase/include/mtr0mtr.h: mvdir storage/innobase/include/mtr0mtr.ic: mvdir storage/innobase/include/mtr0types.h: mvdir storage/innobase/include/os0file.h: mvdir storage/innobase/include/os0proc.h: mvdir storage/innobase/include/os0proc.ic: mvdir storage/innobase/include/os0sync.h: mvdir storage/innobase/include/os0sync.ic: mvdir storage/innobase/include/os0thread.h: mvdir storage/innobase/include/os0thread.ic: mvdir storage/innobase/include/page0cur.h: mvdir storage/innobase/include/page0cur.ic: mvdir storage/innobase/include/page0page.h: mvdir storage/innobase/include/page0page.ic: mvdir storage/innobase/include/page0types.h: mvdir storage/innobase/include/pars0grm.h: mvdir storage/innobase/include/pars0opt.h: mvdir storage/innobase/include/pars0opt.ic: mvdir storage/innobase/include/pars0pars.h: mvdir storage/innobase/include/pars0pars.ic: mvdir storage/innobase/include/pars0sym.h: mvdir storage/innobase/include/pars0sym.ic: mvdir storage/innobase/include/pars0types.h: mvdir storage/innobase/include/que0que.h: mvdir storage/innobase/include/que0que.ic: mvdir storage/innobase/include/que0types.h: mvdir storage/innobase/include/read0read.h: mvdir storage/innobase/include/read0read.ic: mvdir storage/innobase/include/read0types.h: mvdir storage/innobase/include/rem0cmp.h: mvdir storage/innobase/include/rem0cmp.ic: mvdir storage/innobase/include/rem0rec.h: mvdir storage/innobase/include/rem0rec.ic: mvdir storage/innobase/include/rem0types.h: mvdir storage/innobase/include/row0ins.h: mvdir storage/innobase/include/row0ins.ic: mvdir storage/innobase/include/row0mysql.h: mvdir storage/innobase/include/row0mysql.ic: mvdir storage/innobase/include/row0purge.h: mvdir storage/innobase/include/row0purge.ic: mvdir storage/innobase/include/row0row.h: mvdir storage/innobase/include/row0row.ic: mvdir storage/innobase/include/row0sel.h: mvdir storage/innobase/include/row0sel.ic: mvdir storage/innobase/include/row0types.h: mvdir storage/innobase/include/row0uins.h: mvdir storage/innobase/include/row0uins.ic: mvdir storage/innobase/include/row0umod.h: mvdir storage/innobase/include/row0umod.ic: mvdir storage/innobase/include/row0undo.h: mvdir storage/innobase/include/row0undo.ic: mvdir storage/innobase/include/row0upd.h: mvdir storage/innobase/include/row0upd.ic: mvdir storage/innobase/include/row0vers.h: mvdir storage/innobase/include/row0vers.ic: mvdir storage/innobase/include/srv0que.h: mvdir storage/innobase/include/srv0srv.h: mvdir storage/innobase/include/srv0srv.ic: mvdir storage/innobase/include/srv0start.h: mvdir storage/innobase/include/sync0arr.h: mvdir storage/innobase/include/sync0arr.ic: mvdir storage/innobase/include/sync0rw.h: mvdir storage/innobase/include/sync0rw.ic: mvdir storage/innobase/include/sync0sync.h: mvdir storage/innobase/include/sync0sync.ic: mvdir storage/innobase/include/sync0types.h: mvdir storage/innobase/include/thr0loc.h: mvdir storage/innobase/include/thr0loc.ic: mvdir storage/innobase/include/trx0purge.h: mvdir storage/innobase/include/trx0purge.ic: mvdir storage/innobase/include/trx0rec.h: mvdir storage/innobase/include/trx0rec.ic: mvdir storage/innobase/include/trx0roll.h: mvdir storage/innobase/include/trx0roll.ic: mvdir storage/innobase/include/trx0rseg.h: mvdir storage/innobase/include/trx0rseg.ic: mvdir storage/innobase/include/trx0sys.h: mvdir storage/innobase/include/trx0sys.ic: mvdir storage/innobase/include/trx0trx.h: mvdir storage/innobase/include/trx0trx.ic: mvdir storage/innobase/include/trx0types.h: mvdir storage/innobase/include/trx0undo.h: mvdir storage/innobase/include/trx0undo.ic: mvdir storage/innobase/include/trx0xa.h: mvdir storage/innobase/include/univ.i: mvdir storage/innobase/include/usr0sess.h: mvdir storage/innobase/include/usr0sess.ic: mvdir storage/innobase/include/usr0types.h: mvdir storage/innobase/include/ut0byte.h: mvdir storage/innobase/include/ut0byte.ic: mvdir storage/innobase/include/ut0dbg.h: mvdir storage/innobase/include/ut0lst.h: mvdir storage/innobase/include/ut0mem.h: mvdir storage/innobase/include/ut0mem.ic: mvdir storage/innobase/include/ut0rnd.h: mvdir storage/innobase/include/ut0rnd.ic: mvdir storage/innobase/include/ut0sort.h: mvdir storage/innobase/include/ut0ut.h: mvdir storage/innobase/include/ut0ut.ic: mvdir storage/innobase/lock/Makefile.am: mvdir storage/innobase/lock/lock0lock.c: mvdir storage/innobase/lock/makefilewin: mvdir storage/innobase/log/Makefile.am: mvdir storage/innobase/log/log0log.c: mvdir storage/innobase/log/log0recv.c: mvdir storage/innobase/log/makefilewin: mvdir storage/innobase/mach/Makefile.am: mvdir storage/innobase/mach/mach0data.c: mvdir storage/innobase/mach/makefilewin: mvdir storage/innobase/mem/Makefile.am: mvdir storage/innobase/mem/makefilewin: mvdir storage/innobase/mem/mem0dbg.c: mvdir storage/innobase/mem/mem0mem.c: mvdir storage/innobase/mem/mem0pool.c: mvdir storage/innobase/mtr/Makefile.am: mvdir storage/innobase/mtr/makefilewin: mvdir storage/innobase/mtr/mtr0log.c: mvdir storage/innobase/mtr/mtr0mtr.c: mvdir storage/innobase/os/Makefile.am: mvdir storage/innobase/os/makefilewin: mvdir storage/innobase/os/os0file.c: mvdir storage/innobase/os/os0proc.c: mvdir storage/innobase/os/os0sync.c: mvdir storage/innobase/os/os0thread.c: mvdir storage/innobase/page/Makefile.am: mvdir storage/innobase/page/makefilewin: mvdir storage/innobase/page/page0cur.c: mvdir storage/innobase/page/page0page.c: mvdir storage/innobase/pars/Makefile.am: mvdir storage/innobase/pars/lexyy.c: mvdir storage/innobase/pars/makefilewin: mvdir storage/innobase/pars/pars0grm.c: mvdir storage/innobase/pars/pars0grm.h: mvdir storage/innobase/pars/pars0grm.y: mvdir storage/innobase/pars/pars0lex.l: mvdir storage/innobase/pars/pars0opt.c: mvdir storage/innobase/pars/pars0pars.c: mvdir storage/innobase/pars/pars0sym.c: mvdir storage/innobase/que/Makefile.am: mvdir storage/innobase/que/makefilewin: mvdir storage/innobase/que/que0que.c: mvdir storage/innobase/read/Makefile.am: mvdir storage/innobase/read/makefilewin: mvdir storage/innobase/read/read0read.c: mvdir storage/innobase/rem/Makefile.am: mvdir storage/innobase/rem/makefilewin: mvdir storage/innobase/rem/rem0cmp.c: mvdir storage/innobase/rem/rem0rec.c: mvdir storage/innobase/row/Makefile.am: mvdir storage/innobase/row/makefilewin: mvdir storage/innobase/row/row0ins.c: mvdir storage/innobase/row/row0mysql.c: mvdir storage/innobase/row/row0purge.c: mvdir storage/innobase/row/row0row.c: mvdir storage/innobase/row/row0sel.c: mvdir storage/innobase/row/row0uins.c: mvdir storage/innobase/row/row0umod.c: mvdir storage/innobase/row/row0undo.c: mvdir storage/innobase/row/row0upd.c: mvdir storage/innobase/row/row0vers.c: mvdir storage/innobase/srv/Makefile.am: mvdir storage/innobase/srv/makefilewin: mvdir storage/innobase/srv/srv0que.c: mvdir storage/innobase/srv/srv0srv.c: mvdir storage/innobase/srv/srv0start.c: mvdir storage/innobase/sync/Makefile.am: mvdir storage/innobase/sync/makefilewin: mvdir storage/innobase/sync/sync0arr.c: mvdir storage/innobase/sync/sync0rw.c: mvdir storage/innobase/sync/sync0sync.c: mvdir storage/innobase/thr/Makefile.am: mvdir storage/innobase/thr/makefilewin: mvdir storage/innobase/thr/thr0loc.c: mvdir storage/innobase/trx/Makefile.am: mvdir storage/innobase/trx/makefilewin: mvdir storage/innobase/trx/trx0purge.c: mvdir storage/innobase/trx/trx0rec.c: mvdir storage/innobase/trx/trx0roll.c: mvdir storage/innobase/trx/trx0rseg.c: mvdir storage/innobase/trx/trx0sys.c: mvdir storage/innobase/trx/trx0trx.c: mvdir storage/innobase/trx/trx0undo.c: mvdir storage/innobase/usr/Makefile.am: mvdir storage/innobase/usr/makefilewin: mvdir storage/innobase/usr/usr0sess.c: mvdir storage/innobase/ut/Makefile.am: mvdir storage/innobase/ut/makefilewin: mvdir storage/innobase/ut/ut0byte.c: mvdir storage/innobase/ut/ut0dbg.c: mvdir storage/innobase/ut/ut0mem.c: mvdir storage/innobase/ut/ut0rnd.c: mvdir storage/innobase/ut/ut0ut.c: mvdir storage/ndb/Makefile.am: mvdir storage/ndb/bin/.empty: mvdir storage/ndb/bin/check-regression.sh: mvdir storage/ndb/bin/makeTestPrograms_html.sh: mvdir storage/ndb/config/common.mk.am: mvdir storage/ndb/config/make-win-dsw.sh: mvdir storage/ndb/config/type_kernel.mk.am: mvdir storage/ndb/config/type_mgmapiclient.mk.am: mvdir storage/ndb/config/type_ndbapi.mk.am: mvdir storage/ndb/config/type_ndbapiclient.mk.am: mvdir storage/ndb/config/type_ndbapitest.mk.am: mvdir storage/ndb/config/type_ndbapitools.mk.am: mvdir storage/ndb/config/type_util.mk.am: mvdir storage/ndb/config/win-includes: mvdir storage/ndb/config/win-lib.am: mvdir storage/ndb/config/win-libraries: mvdir storage/ndb/config/win-name: mvdir storage/ndb/config/win-prg.am: mvdir storage/ndb/config/win-sources: mvdir storage/ndb/demos/1-node/1-api-3/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-db-2/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-mgm-1/template_config.ini: mvdir storage/ndb/demos/2-node/2-api-4/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-5/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-6/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-7/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-db-2/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-db-3/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-mgm-1/template_config.ini: mvdir storage/ndb/demos/config-templates/config_template-1-REP.ini: mvdir storage/ndb/demos/config-templates/config_template-4.ini: mvdir storage/ndb/demos/config-templates/config_template-install.ini: mvdir storage/ndb/demos/run_demo1-PS-SS_common.sh: mvdir storage/ndb/demos/run_demo1-PS.sh: mvdir storage/ndb/demos/run_demo1-SS.sh: mvdir storage/ndb/demos/run_demo1.sh: mvdir storage/ndb/demos/run_demo2.sh: mvdir storage/ndb/docs/Makefile.am: mvdir storage/ndb/docs/README: mvdir storage/ndb/docs/doxygen/Doxyfile.mgmapi: mvdir storage/ndb/docs/doxygen/Doxyfile.ndbapi: mvdir storage/ndb/docs/doxygen/Doxyfile.ndb: mvdir storage/ndb/docs/doxygen/Doxyfile.odbc: mvdir storage/ndb/docs/doxygen/Doxyfile.test: mvdir storage/ndb/docs/doxygen/header.mgmapi.tex: mvdir storage/ndb/docs/doxygen/header.ndbapi.tex: mvdir storage/ndb/docs/doxygen/postdoxy.pl: mvdir storage/ndb/docs/doxygen/predoxy.pl: mvdir storage/ndb/docs/wl2077.txt: mvdir storage/ndb/home/bin/Linuxmkisofs: mvdir storage/ndb/home/bin/Solarismkisofs: mvdir storage/ndb/home/bin/cvs2cl.pl: mvdir storage/ndb/home/bin/fix-cvs-root: mvdir storage/ndb/home/bin/import-from-bk.sh: mvdir storage/ndb/home/bin/ndb_deploy: mvdir storage/ndb/home/bin/ndbdoxy.pl: mvdir storage/ndb/home/bin/ngcalc: mvdir storage/ndb/home/bin/parseConfigFile.awk: mvdir storage/ndb/home/bin/setup-test.sh: mvdir storage/ndb/home/bin/signallog2html.lib/signallog2list.awk: mvdir storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk: mvdir storage/ndb/home/bin/signallog2html.sh: mvdir storage/ndb/home/bin/stripcr: mvdir storage/ndb/home/lib/funcs.sh: mvdir storage/ndb/include/Makefile.am: mvdir storage/ndb/include/debugger/DebuggerNames.hpp: mvdir storage/ndb/include/debugger/EventLogger.hpp: mvdir storage/ndb/include/debugger/GrepError.hpp: mvdir storage/ndb/include/debugger/SignalLoggerManager.hpp: mvdir storage/ndb/include/editline/editline.h: mvdir storage/ndb/include/kernel/AttributeDescriptor.hpp: mvdir storage/ndb/include/kernel/AttributeHeader.hpp: mvdir storage/ndb/include/kernel/AttributeList.hpp: mvdir storage/ndb/include/kernel/BlockNumbers.h: mvdir storage/ndb/include/kernel/GlobalSignalNumbers.h: mvdir storage/ndb/include/kernel/GrepEvent.hpp: mvdir storage/ndb/include/kernel/Interpreter.hpp: mvdir storage/ndb/include/kernel/LogLevel.hpp: mvdir storage/ndb/include/kernel/NodeBitmask.hpp: mvdir storage/ndb/include/kernel/NodeInfo.hpp: mvdir storage/ndb/include/kernel/NodeState.hpp: mvdir storage/ndb/include/kernel/RefConvert.hpp: mvdir storage/ndb/include/kernel/kernel_config_parameters.h: mvdir storage/ndb/include/kernel/kernel_types.h: mvdir storage/ndb/include/kernel/ndb_limits.h: mvdir storage/ndb/include/kernel/signaldata/AbortAll.hpp: mvdir storage/ndb/include/kernel/signaldata/AccFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/AccLock.hpp: mvdir storage/ndb/include/kernel/signaldata/AccScan.hpp: mvdir storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTab.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTable.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/ApiVersion.hpp: mvdir storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/AttrInfo.hpp: mvdir storage/ndb/include/kernel/trigger_definitions.h: mvdir storage/ndb/include/ndb_constants.h: mvdir storage/ndb/include/ndb_global.h.in: mvdir storage/ndb/include/ndb_init.h: mvdir storage/ndb/include/ndb_net.h: mvdir storage/ndb/include/ndb_types.h.in: mvdir storage/ndb/include/ndb_version.h.in: mvdir storage/ndb/include/kernel/signaldata/BackupContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/BackupImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/BackupSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/BuildIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp: mvdir storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CmInit.hpp: mvdir storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrStart.hpp: mvdir storage/ndb/include/kernel/signaldata/ConfigParamId.hpp: mvdir storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyActive.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateEvnt.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTab.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTable.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/DiAddTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DiGetNodes.hpp: mvdir storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/DictStart.hpp: mvdir storage/ndb/include/kernel/signaldata/DictTabInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/DihAddFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/DihContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/DihStartTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp: mvdir storage/ndb/include/kernel/signaldata/DisconnectRep.hpp: mvdir storage/ndb/include/kernel/signaldata/DropIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTabFile.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTable.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/EmptyLcp.hpp: mvdir storage/ndb/include/kernel/signaldata/EndTo.hpp: mvdir storage/ndb/include/kernel/signaldata/EventReport.hpp: mvdir storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp: mvdir storage/ndb/include/kernel/signaldata/ExecFragReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FailRep.hpp: mvdir storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/FsAppendReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsCloseReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsConf.hpp: mvdir storage/ndb/include/kernel/signaldata/FsOpenReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsRef.hpp: mvdir storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp: mvdir storage/ndb/include/kernel/signaldata/GCPSave.hpp: mvdir storage/ndb/include/kernel/signaldata/GetTabInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/GetTableId.hpp: mvdir storage/ndb/include/kernel/signaldata/GrepImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/HotSpareRep.hpp: mvdir storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp: mvdir storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp: mvdir storage/ndb/include/kernel/signaldata/KeyInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/LCP.hpp: mvdir storage/ndb/include/kernel/signaldata/ListTables.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhKey.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhTransConf.hpp: mvdir storage/ndb/include/kernel/signaldata/ManagementServer.hpp: mvdir storage/ndb/include/kernel/signaldata/MasterGCP.hpp: mvdir storage/ndb/include/kernel/signaldata/MasterLCP.hpp: mvdir storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp: mvdir storage/ndb/include/kernel/signaldata/NdbSttor.hpp: mvdir storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/NextScan.hpp: mvdir storage/ndb/include/kernel/signaldata/NodeFailRep.hpp: mvdir storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/PackedSignal.hpp: mvdir storage/ndb/include/kernel/signaldata/PrepDropTab.hpp: mvdir storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp: mvdir storage/ndb/include/kernel/signaldata/ReadConfig.hpp: mvdir storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp: mvdir storage/ndb/include/kernel/signaldata/RelTabMem.hpp: mvdir storage/ndb/include/kernel/signaldata/RepImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/ResumeReq.hpp: mvdir storage/ndb/include/kernel/signaldata/ScanFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/ScanTab.hpp: mvdir storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/SetVarReq.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp: mvdir storage/ndb/include/kernel/signaldata/SrFragidConf.hpp: mvdir storage/ndb/include/kernel/signaldata/StartFragReq.hpp: mvdir storage/ndb/include/kernel/signaldata/StartInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/StartMe.hpp: mvdir storage/ndb/include/kernel/signaldata/StartOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/StartPerm.hpp: mvdir storage/ndb/include/kernel/signaldata/StartRec.hpp: mvdir storage/ndb/include/kernel/signaldata/StartTo.hpp: mvdir storage/ndb/include/kernel/signaldata/StopMe.hpp: mvdir storage/ndb/include/kernel/signaldata/StopPerm.hpp: mvdir storage/ndb/include/kernel/signaldata/StopReq.hpp: mvdir storage/ndb/include/kernel/signaldata/SumaImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/SystemError.hpp: mvdir storage/ndb/include/kernel/signaldata/TamperOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/TcCommit.hpp: mvdir storage/ndb/include/kernel/signaldata/TcContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/TcHbRep.hpp: mvdir storage/ndb/include/kernel/signaldata/TcIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyConf.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyRef.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp: mvdir storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TestOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/TransIdAI.hpp: mvdir storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/TupCommit.hpp: mvdir storage/ndb/include/kernel/signaldata/TupFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/TupKey.hpp: mvdir storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxBound.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxMaint.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/UpdateTo.hpp: mvdir storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilDelete.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilExecute.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilLock.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilPrepare.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilRelease.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilSequence.hpp: mvdir storage/ndb/include/kernel/signaldata/WaitGCP.hpp: mvdir storage/ndb/include/logger/ConsoleLogHandler.hpp: mvdir storage/ndb/include/logger/FileLogHandler.hpp: mvdir storage/ndb/include/logger/LogHandler.hpp: mvdir storage/ndb/include/logger/Logger.hpp: mvdir storage/ndb/include/logger/SysLogHandler.hpp: mvdir storage/ndb/include/mgmapi/mgmapi.h: mvdir storage/ndb/include/mgmapi/mgmapi_config_parameters.h: mvdir storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h: mvdir storage/ndb/include/mgmapi/mgmapi_debug.h: mvdir storage/ndb/include/mgmapi/ndb_logevent.h: mvdir storage/ndb/include/mgmcommon/ConfigRetriever.hpp: mvdir storage/ndb/include/mgmcommon/IPCConfig.hpp: mvdir storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp: mvdir storage/ndb/include/ndbapi/Ndb.hpp: mvdir storage/ndb/include/ndbapi/NdbApi.hpp: mvdir storage/ndb/include/ndbapi/NdbBlob.hpp: mvdir storage/ndb/include/ndbapi/NdbDictionary.hpp: mvdir storage/ndb/include/ndbapi/NdbError.hpp: mvdir storage/ndb/include/ndbapi/NdbEventOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbIndexOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbPool.hpp: mvdir storage/ndb/include/ndbapi/NdbRecAttr.hpp: mvdir storage/ndb/include/ndbapi/NdbReceiver.hpp: mvdir storage/ndb/include/ndbapi/NdbScanFilter.hpp: mvdir storage/ndb/include/ndbapi/NdbScanOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbTransaction.hpp: mvdir storage/ndb/include/ndbapi/ndb_cluster_connection.hpp: mvdir storage/ndb/include/ndbapi/ndb_opt_defaults.h: mvdir storage/ndb/include/ndbapi/ndbapi_limits.h: mvdir storage/ndb/include/ndbapi/ndberror.h: mvdir storage/ndb/include/newtonapi/dba.h: mvdir storage/ndb/include/newtonapi/defs/pcn_types.h: mvdir storage/ndb/include/portlib/NdbCondition.h: mvdir storage/ndb/include/portlib/NdbConfig.h: mvdir storage/ndb/include/portlib/NdbDaemon.h: mvdir storage/ndb/include/portlib/NdbEnv.h: mvdir storage/ndb/include/portlib/NdbHost.h: mvdir storage/ndb/include/portlib/NdbMain.h: mvdir storage/ndb/include/portlib/NdbMem.h: mvdir storage/ndb/include/portlib/NdbMutex.h: mvdir storage/ndb/include/portlib/NdbSleep.h: mvdir storage/ndb/include/portlib/NdbTCP.h: mvdir storage/ndb/include/portlib/NdbThread.h: mvdir storage/ndb/include/portlib/NdbTick.h: mvdir storage/ndb/include/portlib/PortDefs.h: mvdir storage/ndb/include/portlib/prefetch.h: mvdir storage/ndb/include/transporter/TransporterCallback.hpp: mvdir storage/ndb/include/transporter/TransporterDefinitions.hpp: mvdir storage/ndb/include/transporter/TransporterRegistry.hpp: mvdir storage/ndb/include/util/Base64.hpp: mvdir storage/ndb/include/util/BaseString.hpp: mvdir storage/ndb/include/util/Bitmask.hpp: mvdir storage/ndb/include/util/ConfigValues.hpp: mvdir storage/ndb/include/util/File.hpp: mvdir storage/ndb/include/util/InputStream.hpp: mvdir storage/ndb/include/util/NdbAutoPtr.hpp: mvdir storage/ndb/include/util/NdbOut.hpp: mvdir storage/ndb/include/util/NdbSqlUtil.hpp: mvdir storage/ndb/include/util/OutputStream.hpp: mvdir storage/ndb/include/util/Parser.hpp: mvdir storage/ndb/include/util/Properties.hpp: mvdir storage/ndb/include/util/SimpleProperties.hpp: mvdir storage/ndb/include/util/SocketAuthenticator.hpp: mvdir storage/ndb/include/util/SocketClient.hpp: mvdir storage/ndb/include/util/SocketServer.hpp: mvdir storage/ndb/include/util/UtilBuffer.hpp: mvdir storage/ndb/include/util/Vector.hpp: mvdir storage/ndb/include/util/basestring_vsnprintf.h: mvdir storage/ndb/include/util/md5_hash.hpp: mvdir storage/ndb/include/util/ndb_opts.h: mvdir storage/ndb/include/util/random.h: mvdir storage/ndb/include/util/socket_io.h: mvdir storage/ndb/include/util/uucode.h: mvdir storage/ndb/include/util/version.h: mvdir storage/ndb/lib/.empty: mvdir storage/ndb/ndbapi-examples/Makefile: mvdir storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile: mvdir storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp: mvdir storage/ndb/src/Makefile.am: mvdir storage/ndb/src/common/Makefile.am: mvdir storage/ndb/src/common/debugger/BlockNames.cpp: mvdir storage/ndb/src/common/debugger/DebuggerNames.cpp: mvdir storage/ndb/src/common/debugger/EventLogger.cpp: mvdir storage/ndb/src/common/debugger/GrepError.cpp: mvdir storage/ndb/src/common/debugger/Makefile.am: mvdir storage/ndb/src/common/debugger/SignalLoggerManager.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AccLock.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTable.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp: mvdir storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CntrStart.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FailRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/GCPSave.cpp: mvdir storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LCP.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhKey.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp: mvdir storage/ndb/src/common/debugger/signaldata/Makefile.am: mvdir storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ScanTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalNames.cpp: mvdir storage/ndb/src/common/debugger/signaldata/StartRec.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SystemError.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TupCommit.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TupKey.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilLock.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp: mvdir storage/ndb/src/common/debugger/signaldata/print.awk: mvdir storage/ndb/src/common/logger/ConsoleLogHandler.cpp: mvdir storage/ndb/src/common/logger/FileLogHandler.cpp: mvdir storage/ndb/src/common/logger/LogHandler.cpp: mvdir storage/ndb/src/common/logger/LogHandlerList.cpp: mvdir storage/ndb/src/common/logger/LogHandlerList.hpp: mvdir storage/ndb/src/common/logger/Logger.cpp: mvdir storage/ndb/src/common/logger/Makefile.am: mvdir storage/ndb/src/common/logger/SysLogHandler.cpp: mvdir storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp: mvdir storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp: mvdir storage/ndb/src/common/logger/listtest/Makefile: mvdir storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp: mvdir storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp: mvdir storage/ndb/src/common/logger/loggertest/Makefile: mvdir storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp: mvdir storage/ndb/src/common/mgmcommon/IPCConfig.cpp: mvdir storage/ndb/src/common/mgmcommon/Makefile.am: mvdir storage/ndb/src/common/mgmcommon/printConfig/Makefile: mvdir storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp: mvdir storage/ndb/src/common/portlib/Makefile.am: mvdir storage/ndb/src/common/portlib/NdbCondition.c: mvdir storage/ndb/src/common/portlib/NdbConfig.c: mvdir storage/ndb/src/common/portlib/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/NdbEnv.c: mvdir storage/ndb/src/common/portlib/NdbHost.c: mvdir storage/ndb/src/common/portlib/NdbMem.c: mvdir storage/ndb/src/common/portlib/NdbMutex.c: mvdir storage/ndb/src/common/portlib/NdbPortLibTest.cpp: mvdir storage/ndb/src/common/portlib/NdbSleep.c: mvdir storage/ndb/src/common/portlib/NdbTCP.cpp: mvdir storage/ndb/src/common/portlib/NdbThread.c: mvdir storage/ndb/src/common/portlib/NdbTick.c: mvdir storage/ndb/src/common/portlib/gcc.cpp: mvdir storage/ndb/src/common/portlib/memtest.c: mvdir storage/ndb/src/common/portlib/mmslist.cpp: mvdir storage/ndb/src/common/portlib/mmstest.cpp: mvdir storage/ndb/src/common/portlib/munmaptest.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/memtest/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/ose/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbHost.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMem.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbThread.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbTick.c: mvdir storage/ndb/src/common/portlib/old_dirs/test/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/win32/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbHost.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbMem.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbThread.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbTick.c: mvdir storage/ndb/src/common/portlib/win32/NdbCondition.c: mvdir storage/ndb/src/common/portlib/win32/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/win32/NdbEnv.c: mvdir storage/ndb/src/common/portlib/win32/NdbHost.c: mvdir storage/ndb/src/common/portlib/win32/NdbMem.c: mvdir storage/ndb/src/common/portlib/win32/NdbMutex.c: mvdir storage/ndb/src/common/portlib/win32/NdbSleep.c: mvdir storage/ndb/src/common/portlib/win32/NdbTCP.c: mvdir storage/ndb/src/common/portlib/win32/NdbThread.c: mvdir storage/ndb/src/common/portlib/win32/NdbTick.c: mvdir storage/ndb/src/common/transporter/Makefile.am: mvdir storage/ndb/src/common/transporter/OSE_Receiver.cpp: mvdir storage/ndb/src/common/transporter/OSE_Receiver.hpp: mvdir storage/ndb/src/common/transporter/OSE_Signals.hpp: mvdir storage/ndb/src/common/transporter/OSE_Transporter.cpp: mvdir storage/ndb/src/common/transporter/OSE_Transporter.hpp: mvdir storage/ndb/src/common/transporter/Packer.cpp: mvdir storage/ndb/src/common/transporter/Packer.hpp: mvdir storage/ndb/src/common/transporter/SCI_Transporter.cpp: mvdir storage/ndb/src/common/transporter/SCI_Transporter.hpp: mvdir storage/ndb/src/common/transporter/SHM_Buffer.hpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.cpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.hpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp: mvdir storage/ndb/src/common/transporter/SendBuffer.cpp: mvdir storage/ndb/src/common/transporter/SendBuffer.hpp: mvdir storage/ndb/src/common/transporter/TCP_Transporter.cpp: mvdir storage/ndb/src/common/transporter/TCP_Transporter.hpp: mvdir storage/ndb/src/common/transporter/Transporter.cpp: mvdir storage/ndb/src/common/transporter/Transporter.hpp: mvdir storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp: mvdir storage/ndb/src/common/transporter/TransporterRegistry.cpp: mvdir storage/ndb/src/common/transporter/basictest/Makefile: mvdir storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/buddy.cpp: mvdir storage/ndb/src/common/transporter/buddy.hpp: mvdir storage/ndb/src/common/transporter/failoverSCI/Makefile: mvdir storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp: mvdir storage/ndb/src/common/transporter/perftest/Makefile: mvdir storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/priotest/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioOSE/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSCI/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioSHM/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTCP/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp: mvdir storage/ndb/src/common/util/Base64.cpp: mvdir storage/ndb/src/common/util/BaseString.cpp: mvdir storage/ndb/src/common/util/Bitmask.cpp: mvdir storage/ndb/src/common/util/ConfigValues.cpp: mvdir storage/ndb/src/common/util/File.cpp: mvdir storage/ndb/src/common/util/InputStream.cpp: mvdir storage/ndb/src/common/util/Makefile.am: mvdir storage/ndb/src/common/util/NdbErrHnd.cpp: mvdir storage/ndb/src/common/util/NdbOut.cpp: mvdir storage/ndb/src/common/util/NdbSqlUtil.cpp: mvdir storage/ndb/src/common/util/OutputStream.cpp: mvdir storage/ndb/src/common/util/Parser.cpp: mvdir storage/ndb/src/common/util/Properties.cpp: mvdir storage/ndb/src/common/util/SimpleProperties.cpp: mvdir storage/ndb/src/common/util/SocketAuthenticator.cpp: mvdir storage/ndb/src/common/util/SocketClient.cpp: mvdir storage/ndb/src/common/util/SocketServer.cpp: mvdir storage/ndb/src/common/util/basestring_vsnprintf.c: mvdir storage/ndb/src/common/util/filetest/FileUnitTest.cpp: mvdir storage/ndb/src/common/util/filetest/FileUnitTest.hpp: mvdir storage/ndb/src/common/util/filetest/Makefile: mvdir storage/ndb/src/common/util/getarg.cat3: mvdir storage/ndb/src/common/util/md5_hash.cpp: mvdir storage/ndb/src/common/util/ndb_init.c: mvdir storage/ndb/src/common/util/new.cpp: mvdir storage/ndb/src/common/util/random.c: mvdir storage/ndb/src/common/util/socket_io.cpp: mvdir storage/ndb/src/common/util/strdup.c: mvdir storage/ndb/src/common/util/testConfigValues/Makefile: mvdir storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp: mvdir storage/ndb/src/common/util/uucode.c: mvdir storage/ndb/src/common/util/version.c: mvdir storage/ndb/src/common/util/testProperties/Makefile: mvdir storage/ndb/src/common/util/testProperties/testProperties.cpp: mvdir storage/ndb/src/common/util/testSimpleProperties/Makefile: mvdir storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp: mvdir storage/ndb/src/cw/Makefile.am: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj: mvdir storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO: mvdir storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/Open.ICO: mvdir storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/TreeView.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp: mvdir storage/ndb/src/cw/cpcc-win32/C++/resource.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/small.ico: mvdir storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp: mvdir storage/ndb/src/cw/cpcc-win32/csharp/App.ico: mvdir storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Database.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln: mvdir storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Process.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Computer.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Database.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Module1.bas: mvdir storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp: mvdir storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Process.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/closed folder.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/computer.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmMain.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/networking.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/open folder.ico: mvdir storage/ndb/src/cw/cpcd/APIService.cpp: mvdir storage/ndb/src/cw/cpcd/APIService.hpp: mvdir storage/ndb/src/cw/cpcd/CPCD.cpp: mvdir storage/ndb/src/cw/cpcd/CPCD.hpp: mvdir storage/ndb/src/cw/cpcd/Makefile.am: mvdir storage/ndb/src/cw/cpcd/Monitor.cpp: mvdir storage/ndb/src/cw/cpcd/Process.cpp: mvdir storage/ndb/src/cw/cpcd/common.cpp: mvdir storage/ndb/src/cw/cpcd/common.hpp: mvdir storage/ndb/src/cw/cpcd/main.cpp: mvdir storage/ndb/src/cw/test/socketclient/Makefile: mvdir storage/ndb/src/cw/test/socketclient/socketClientTest.cpp: mvdir storage/ndb/src/cw/util/ClientInterface.cpp: mvdir storage/ndb/src/cw/util/ClientInterface.hpp: mvdir storage/ndb/src/cw/util/Makefile: mvdir storage/ndb/src/cw/util/SocketRegistry.cpp: mvdir storage/ndb/src/cw/util/SocketRegistry.hpp: mvdir storage/ndb/src/cw/util/SocketService.cpp: mvdir storage/ndb/src/cw/util/SocketService.hpp: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib: mvdir storage/ndb/src/kernel/Makefile.am: mvdir storage/ndb/src/kernel/SimBlockList.cpp: mvdir storage/ndb/src/kernel/blocks/ERROR_codes.txt: mvdir storage/ndb/src/kernel/blocks/Makefile.am: mvdir storage/ndb/src/kernel/blocks/NodeRestart.new.txt: mvdir storage/ndb/src/kernel/blocks/NodeRestart.txt: mvdir storage/ndb/src/kernel/blocks/Start.txt: mvdir storage/ndb/src/kernel/blocks/SystemRestart.new.txt: mvdir storage/ndb/src/kernel/blocks/SystemRestart.txt: mvdir storage/ndb/src/kernel/blocks/backup/Backup.cpp: mvdir storage/ndb/src/kernel/blocks/backup/Backup.hpp: mvdir storage/ndb/src/kernel/blocks/backup/Backup.txt: mvdir storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp: mvdir storage/ndb/src/kernel/blocks/backup/BackupInit.cpp: mvdir storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp: mvdir storage/ndb/src/kernel/blocks/backup/Makefile.am: mvdir storage/ndb/src/kernel/blocks/backup/read.cpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp: mvdir storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbacc/Makefile.am: mvdir storage/ndb/src/kernel/blocks/mutexes.hpp: mvdir storage/ndb/src/kernel/blocks/new-block.tar.gz: mvdir storage/ndb/src/kernel/main.cpp: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/DropTable.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Event.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl: mvdir storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl: mvdir storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp: mvdir storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/LCP.txt: mvdir storage/ndb/src/kernel/blocks/dbdih/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp: mvdir storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile: mvdir storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp: mvdir storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp: mvdir storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp: mvdir storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtup/Notes.txt: mvdir storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtux/Times.txt: mvdir storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt: mvdir storage/ndb/src/kernel/blocks/dbutil/Makefile.am: mvdir storage/ndb/src/kernel/blocks/grep/Grep.cpp: mvdir storage/ndb/src/kernel/blocks/grep/Grep.hpp: mvdir storage/ndb/src/kernel/blocks/grep/GrepInit.cpp: mvdir storage/ndb/src/kernel/blocks/grep/Makefile.am: mvdir storage/ndb/src/kernel/blocks/grep/systab_test/Makefile: mvdir storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am: mvdir storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Makefile.am: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/Makefile.am: mvdir storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp: mvdir storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/timer.hpp: mvdir storage/ndb/src/kernel/blocks/suma/Makefile.am: mvdir storage/ndb/src/kernel/blocks/suma/Suma.cpp: mvdir storage/ndb/src/kernel/blocks/suma/Suma.hpp: mvdir storage/ndb/src/kernel/blocks/suma/Suma.txt: mvdir storage/ndb/src/kernel/blocks/suma/SumaInit.cpp: mvdir storage/ndb/src/kernel/blocks/trix/Makefile.am: mvdir storage/ndb/src/kernel/blocks/trix/Trix.cpp: mvdir storage/ndb/src/kernel/blocks/trix/Trix.hpp: mvdir storage/ndb/src/kernel/error/Error.hpp: mvdir storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp: mvdir storage/ndb/src/kernel/error/ErrorMessages.cpp: mvdir storage/ndb/src/kernel/error/ErrorMessages.hpp: mvdir storage/ndb/src/kernel/error/ErrorReporter.cpp: mvdir storage/ndb/src/kernel/error/ErrorReporter.hpp: mvdir storage/ndb/src/kernel/error/Makefile.am: mvdir storage/ndb/src/kernel/error/TimeModule.cpp: mvdir storage/ndb/src/kernel/error/TimeModule.hpp: mvdir storage/ndb/src/kernel/vm/Array.hpp: mvdir storage/ndb/src/kernel/vm/ArrayFifoList.hpp: mvdir storage/ndb/src/kernel/vm/ArrayList.hpp: mvdir storage/ndb/src/kernel/vm/ArrayPool.hpp: mvdir storage/ndb/src/kernel/vm/CArray.hpp: mvdir storage/ndb/src/kernel/vm/Callback.hpp: mvdir storage/ndb/src/kernel/vm/ClusterConfiguration.cpp: mvdir storage/ndb/src/kernel/vm/ClusterConfiguration.hpp: mvdir storage/ndb/src/kernel/vm/Configuration.cpp: mvdir storage/ndb/src/kernel/vm/Configuration.hpp: mvdir storage/ndb/src/kernel/vm/DLFifoList.hpp: mvdir storage/ndb/src/kernel/vm/DLHashTable.hpp: mvdir storage/ndb/src/kernel/vm/DLHashTable2.hpp: mvdir storage/ndb/src/kernel/vm/DLList.hpp: mvdir storage/ndb/src/kernel/vm/DataBuffer.hpp: mvdir storage/ndb/src/kernel/vm/Emulator.cpp: mvdir storage/ndb/src/kernel/vm/Emulator.hpp: mvdir storage/ndb/src/kernel/vm/FastScheduler.cpp: mvdir storage/ndb/src/kernel/vm/FastScheduler.hpp: mvdir storage/ndb/src/kernel/vm/GlobalData.hpp: mvdir storage/ndb/src/kernel/vm/KeyTable.hpp: mvdir storage/ndb/src/kernel/vm/KeyTable2.hpp: mvdir storage/ndb/src/kernel/vm/LongSignal.hpp: mvdir storage/ndb/src/kernel/vm/Makefile.am: mvdir storage/ndb/src/kernel/vm/MetaData.cpp: mvdir storage/ndb/src/kernel/vm/MetaData.hpp: mvdir storage/ndb/src/kernel/vm/Mutex.cpp: mvdir storage/ndb/src/kernel/vm/Mutex.hpp: mvdir storage/ndb/src/kernel/vm/Prio.hpp: mvdir storage/ndb/src/kernel/vm/RequestTracker.hpp: mvdir storage/ndb/src/kernel/vm/SLList.hpp: mvdir storage/ndb/src/kernel/vm/SafeCounter.cpp: mvdir storage/ndb/src/kernel/vm/SafeCounter.hpp: mvdir storage/ndb/src/kernel/vm/SectionReader.cpp: mvdir storage/ndb/src/kernel/vm/SectionReader.hpp: mvdir storage/ndb/src/kernel/vm/SignalCounter.hpp: mvdir storage/ndb/src/kernel/vm/SimBlockList.hpp: mvdir storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp: mvdir storage/ndb/src/kernel/vm/SimulatedBlock.cpp: mvdir storage/ndb/src/kernel/vm/SimulatedBlock.hpp: mvdir storage/ndb/src/kernel/vm/ThreadConfig.cpp: mvdir storage/ndb/src/kernel/vm/ThreadConfig.hpp: mvdir storage/ndb/src/kernel/vm/TimeQueue.cpp: mvdir storage/ndb/src/kernel/vm/TimeQueue.hpp: mvdir storage/ndb/src/kernel/vm/TransporterCallback.cpp: mvdir storage/ndb/src/kernel/vm/VMSignal.cpp: mvdir storage/ndb/src/kernel/vm/VMSignal.hpp: mvdir storage/ndb/src/kernel/vm/WaitQueue.hpp: mvdir storage/ndb/src/kernel/vm/WatchDog.cpp: mvdir storage/ndb/src/kernel/vm/WatchDog.hpp: mvdir storage/ndb/src/kernel/vm/al_test/Makefile: mvdir storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp: mvdir storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp: mvdir storage/ndb/src/kernel/vm/al_test/main.cpp: mvdir storage/ndb/src/kernel/vm/pc.hpp: mvdir storage/ndb/src/kernel/vm/testCopy/Makefile: mvdir storage/ndb/src/kernel/vm/testCopy/rr.cpp: mvdir storage/ndb/src/kernel/vm/testCopy/testCopy.cpp: mvdir storage/ndb/src/kernel/vm/testDataBuffer/Makefile: mvdir storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp: mvdir storage/ndb/src/kernel/vm/testLongSig/Makefile: mvdir storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp: mvdir storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile: mvdir storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp: mvdir storage/ndb/src/mgmapi/LocalConfig.cpp: mvdir storage/ndb/src/mgmapi/LocalConfig.hpp: mvdir storage/ndb/src/mgmapi/Makefile.am: mvdir storage/ndb/src/mgmapi/mgmapi.cpp: mvdir storage/ndb/src/mgmapi/mgmapi_configuration.cpp: mvdir storage/ndb/src/mgmapi/mgmapi_configuration.hpp: mvdir storage/ndb/src/mgmapi/mgmapi_internal.h: mvdir storage/ndb/src/mgmapi/ndb_logevent.cpp: mvdir storage/ndb/src/mgmapi/ndb_logevent.hpp: mvdir storage/ndb/src/mgmapi/test/Makefile: mvdir storage/ndb/src/mgmapi/test/keso.c: mvdir storage/ndb/src/mgmapi/test/mgmSrvApi.cpp: mvdir storage/ndb/src/mgmclient/CommandInterpreter.cpp: mvdir storage/ndb/src/mgmclient/Makefile.am: mvdir storage/ndb/src/mgmclient/main.cpp: mvdir storage/ndb/src/mgmclient/ndb_mgmclient.hpp: mvdir storage/ndb/src/mgmclient/ndb_mgmclient.h: mvdir storage/ndb/src/mgmclient/test_cpcd/Makefile: mvdir storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp: mvdir storage/ndb/src/mgmsrv/Config.cpp: mvdir storage/ndb/src/mgmsrv/Config.hpp: mvdir storage/ndb/src/mgmsrv/ConfigInfo.cpp: mvdir storage/ndb/src/mgmsrv/ConfigInfo.hpp: mvdir storage/ndb/src/mgmsrv/InitConfigFileParser.cpp: mvdir storage/ndb/src/mgmsrv/InitConfigFileParser.hpp: mvdir storage/ndb/src/mgmsrv/Makefile.am: mvdir storage/ndb/src/mgmsrv/MgmtSrvr.cpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvr.hpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp: mvdir storage/ndb/src/mgmsrv/Services.cpp: mvdir storage/ndb/src/mgmsrv/Services.hpp: mvdir storage/ndb/src/mgmsrv/SignalQueue.cpp: mvdir storage/ndb/src/mgmsrv/SignalQueue.hpp: mvdir storage/ndb/src/mgmsrv/convertStrToInt.cpp: mvdir storage/ndb/src/mgmsrv/convertStrToInt.hpp: mvdir storage/ndb/src/mgmsrv/main.cpp: mvdir storage/ndb/src/mgmsrv/mkconfig/Makefile: mvdir storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp: mvdir storage/ndb/src/ndbapi/API.hpp: mvdir storage/ndb/src/ndbapi/ClusterMgr.cpp: mvdir storage/ndb/src/ndbapi/ClusterMgr.hpp: mvdir storage/ndb/src/ndbapi/DictCache.cpp: mvdir storage/ndb/src/ndbapi/DictCache.hpp: mvdir storage/ndb/src/ndbapi/Makefile.am: mvdir storage/ndb/src/ndbapi/Ndb.cpp: mvdir storage/ndb/src/ndbapi/NdbApiSignal.cpp: mvdir storage/ndb/src/ndbapi/NdbApiSignal.hpp: mvdir storage/ndb/src/ndbapi/NdbBlob.cpp: mvdir storage/ndb/src/ndbapi/NdbBlobImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbDictionary.cpp: mvdir storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbErrorOut.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbIndexOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbLinHash.hpp: mvdir storage/ndb/src/ndbapi/NdbOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationDefine.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationExec.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationInt.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationScan.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationSearch.cpp: mvdir storage/ndb/src/ndbapi/NdbPool.cpp: mvdir storage/ndb/src/ndbapi/NdbPoolImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbPoolImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbRecAttr.cpp: mvdir storage/ndb/src/ndbapi/NdbReceiver.cpp: mvdir storage/ndb/src/ndbapi/NdbScanFilter.cpp: mvdir storage/ndb/src/ndbapi/NdbScanOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbTransaction.cpp: mvdir storage/ndb/src/ndbapi/NdbTransactionScan.cpp: mvdir storage/ndb/src/ndbapi/NdbUtil.cpp: mvdir storage/ndb/src/ndbapi/NdbUtil.hpp: mvdir storage/ndb/src/ndbapi/NdbWaiter.hpp: mvdir storage/ndb/src/ndbapi/Ndberr.cpp: mvdir storage/ndb/src/ndbapi/Ndbif.cpp: mvdir storage/ndb/src/ndbapi/Ndbinit.cpp: mvdir storage/ndb/src/ndbapi/Ndblist.cpp: mvdir storage/ndb/src/ndbapi/ObjectMap.hpp: mvdir storage/ndb/src/ndbapi/ScanOperation.txt: mvdir storage/ndb/src/ndbapi/TransporterFacade.cpp: mvdir storage/ndb/src/ndbapi/TransporterFacade.hpp: mvdir storage/ndb/src/ndbapi/ndb_cluster_connection.cpp: mvdir storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp: mvdir storage/ndb/src/ndbapi/ndberror.c: mvdir storage/ndb/src/ndbapi/signal-sender/Makefile: mvdir storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp: mvdir storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp: mvdir storage/ndb/src/old_files/client/Makefile: mvdir storage/ndb/src/old_files/client/odbc/Extra.mk: mvdir storage/ndb/src/old_files/client/odbc/Makefile: mvdir storage/ndb/src/old_files/client/odbc/NdbOdbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/NdbOdbc.def: mvdir storage/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_base.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_base.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_root.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_root.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_select.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_select.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Makefile: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp: mvdir storage/ndb/src/old_files/client/odbc/common/AttrArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/AttrArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/CodeTree.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/CodeTree.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/ConnArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/ConnArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Ctx.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/Ctx.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataField.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataField.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataRow.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataRow.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataType.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataType.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DescArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DescArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DiagArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DiagArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Makefile: mvdir storage/ndb/src/old_files/client/odbc/common/OdbcData.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/OdbcData.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/ResultArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/ResultArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Sqlstate.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/Sqlstate.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtInfo.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/common.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/common.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/Makefile: mvdir storage/ndb/src/old_files/client/odbc/docs/class.fig: mvdir storage/ndb/src/old_files/client/odbc/docs/descfield.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/diag.txt: mvdir storage/ndb/src/old_files/client/odbc/docs/getinfo.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/handleattr.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/main.hpp: mvdir storage/ndb/src/old_files/client/odbc/docs/ndbodbc.html: mvdir storage/ndb/src/old_files/client/odbc/docs/select.fig: mvdir storage/ndb/src/old_files/client/odbc/docs/systables.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/type.txt: mvdir storage/ndb/src/old_files/client/odbc/driver/Func.data: mvdir storage/ndb/src/old_files/client/odbc/driver/Func.pl: mvdir storage/ndb/src/old_files/client/odbc/driver/Makefile: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLError.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTables.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/driver.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/driver.hpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Executor.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Executor.hpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Makefile: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/DescSpec.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/FuncTab.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleBase.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleBase.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/InfoTab.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/Makefile: mvdir storage/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/handles.hpp: mvdir storage/ndb/src/old_files/ndbbaseclient/Makefile: mvdir storage/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp: mvdir storage/ndb/src/old_files/ndbclient/Makefile: mvdir storage/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp: mvdir storage/ndb/src/old_files/newtonapi/Makefile: mvdir storage/ndb/src/old_files/newtonapi/dba_binding.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_bulkread.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_config.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_dac.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_error.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_init.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_internal.hpp: mvdir storage/ndb/src/old_files/newtonapi/dba_process.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_process.hpp: mvdir storage/ndb/src/old_files/newtonapi/dba_schema.cpp: mvdir storage/ndb/src/old_files/rep/ExtSender.cpp: mvdir storage/ndb/src/old_files/rep/ExtSender.hpp: mvdir storage/ndb/src/old_files/rep/Makefile: mvdir storage/ndb/src/old_files/rep/NodeConnectInfo.hpp: mvdir storage/ndb/src/old_files/rep/README: mvdir storage/ndb/src/old_files/rep/RepApiInterpreter.cpp: mvdir storage/ndb/src/old_files/rep/RepApiInterpreter.hpp: mvdir storage/ndb/src/old_files/rep/RepApiService.cpp: mvdir storage/ndb/src/old_files/rep/RepApiService.hpp: mvdir storage/ndb/src/old_files/rep/RepCommandInterpreter.cpp: mvdir storage/ndb/src/old_files/rep/RepCommandInterpreter.hpp: mvdir storage/ndb/src/old_files/rep/RepComponents.cpp: mvdir storage/ndb/src/old_files/rep/RepComponents.hpp: mvdir storage/ndb/src/old_files/rep/RepMain.cpp: mvdir storage/ndb/src/old_files/rep/Requestor.cpp: mvdir storage/ndb/src/old_files/rep/Requestor.hpp: mvdir storage/ndb/src/old_files/rep/RequestorSubscriptions.cpp: mvdir storage/ndb/src/old_files/rep/SignalQueue.cpp: mvdir storage/ndb/src/old_files/rep/SignalQueue.hpp: mvdir storage/ndb/src/old_files/rep/TODO: mvdir storage/ndb/src/old_files/rep/adapters/AppNDB.cpp: mvdir storage/ndb/src/old_files/rep/adapters/AppNDB.hpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtAPI.cpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtAPI.hpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtNDB.cpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtNDB.hpp: mvdir storage/ndb/src/old_files/rep/adapters/Makefile: mvdir storage/ndb/src/old_files/rep/adapters/TableInfoPs.hpp: mvdir storage/ndb/src/old_files/rep/dbug_hack.cpp: mvdir storage/ndb/src/old_files/rep/rep_version.hpp: mvdir storage/ndb/src/old_files/rep/repapi/Makefile: mvdir storage/ndb/src/old_files/rep/repapi/repapi.cpp: mvdir storage/ndb/src/old_files/rep/repapi/repapi.h: mvdir storage/ndb/src/old_files/rep/state/Channel.cpp: mvdir storage/ndb/src/old_files/rep/state/Channel.hpp: mvdir storage/ndb/src/old_files/rep/state/Interval.cpp: mvdir storage/ndb/src/old_files/rep/state/Interval.hpp: mvdir storage/ndb/src/old_files/rep/state/Makefile: mvdir storage/ndb/src/old_files/rep/state/RepState.cpp: mvdir storage/ndb/src/old_files/rep/state/RepState.hpp: mvdir storage/ndb/src/old_files/rep/state/RepStateEvent.cpp: mvdir storage/ndb/src/old_files/rep/state/RepStateRequests.cpp: mvdir storage/ndb/src/old_files/rep/state/testInterval/Makefile: mvdir storage/ndb/src/old_files/rep/state/testInterval/testInterval.cpp: mvdir storage/ndb/src/old_files/rep/state/testRepState/Makefile: mvdir storage/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp: mvdir storage/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIBuffer.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIBuffer.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainer.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainer.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainerPS.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainerPS.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIPage.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIPage.hpp: mvdir storage/ndb/src/old_files/rep/storage/LogRecord.hpp: mvdir storage/ndb/src/old_files/rep/storage/Makefile: mvdir storage/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroup.cpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroup.hpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp: mvdir storage/ndb/src/old_files/rep/transfer/Makefile: mvdir storage/ndb/src/old_files/rep/transfer/TransPS.cpp: mvdir storage/ndb/src/old_files/rep/transfer/TransPS.hpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSS.cpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSS.hpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp: mvdir storage/ndb/test/Makefile.am: mvdir storage/ndb/test/include/CpcClient.hpp: mvdir storage/ndb/test/include/HugoAsynchTransactions.hpp: mvdir storage/ndb/test/include/HugoCalculator.hpp: mvdir storage/ndb/test/include/HugoOperations.hpp: mvdir storage/ndb/test/include/HugoTransactions.hpp: mvdir storage/ndb/test/include/NDBT.hpp: mvdir storage/ndb/test/include/NDBT_DataSet.hpp: mvdir storage/ndb/test/include/NDBT_DataSetTransaction.hpp: mvdir storage/ndb/test/include/NDBT_Error.hpp: mvdir storage/ndb/test/include/NDBT_Output.hpp: mvdir storage/ndb/test/include/NDBT_ResultRow.hpp: mvdir storage/ndb/test/include/NDBT_ReturnCodes.h: mvdir storage/ndb/test/include/NDBT_Stats.hpp: mvdir storage/ndb/test/include/NDBT_Table.hpp: mvdir storage/ndb/test/include/NDBT_Tables.hpp: mvdir storage/ndb/test/include/NDBT_Test.hpp: mvdir storage/ndb/test/include/NdbBackup.hpp: mvdir storage/ndb/test/include/NdbConfig.hpp: mvdir storage/ndb/test/include/NdbGrep.hpp: mvdir storage/ndb/test/include/NdbRestarter.hpp: mvdir storage/ndb/test/include/NdbRestarts.hpp: mvdir storage/ndb/test/include/NdbSchemaCon.hpp: mvdir storage/ndb/test/include/NdbSchemaOp.hpp: mvdir storage/ndb/test/include/NdbTest.hpp: mvdir storage/ndb/test/include/NdbTimer.hpp: mvdir storage/ndb/test/include/TestNdbEventOperation.hpp: mvdir storage/ndb/test/include/UtilTransactions.hpp: mvdir storage/ndb/test/include/getarg.h: mvdir storage/ndb/test/ndbapi/InsertRecs.cpp: mvdir storage/ndb/test/ndbapi/Makefile.am: mvdir storage/ndb/test/ndbapi/ScanFilter.hpp: mvdir storage/ndb/test/ndbapi/ScanFunctions.hpp: mvdir storage/ndb/test/ndbapi/ScanInterpretTest.hpp: mvdir storage/ndb/test/ndbapi/TraceNdbApi.cpp: mvdir storage/ndb/test/ndbapi/VerifyNdbApi.cpp: mvdir storage/ndb/test/ndbapi/acid.cpp: mvdir storage/ndb/test/ndbapi/acid2.cpp: mvdir storage/ndb/test/ndbapi/adoInsertRecs.cpp: mvdir storage/ndb/test/ndbapi/asyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/benchronja.cpp: mvdir storage/ndb/test/ndbapi/bulk_copy.cpp: mvdir storage/ndb/test/ndbapi/cdrserver.cpp: mvdir storage/ndb/test/ndbapi/celloDb.cpp: mvdir storage/ndb/test/ndbapi/create_all_tabs.cpp: mvdir storage/ndb/test/ndbapi/create_tab.cpp: mvdir storage/ndb/test/ndbapi/drop_all_tabs.cpp: mvdir storage/ndb/test/ndbapi/flexAsynch.cpp: mvdir storage/ndb/test/ndbapi/flexBench.cpp: mvdir storage/ndb/test/ndbapi/flexHammer.cpp: mvdir storage/ndb/test/ndbapi/flexScan.cpp: mvdir storage/ndb/test/ndbapi/flexTT.cpp: mvdir storage/ndb/test/ndbapi/flexTimedAsynch.cpp: mvdir storage/ndb/test/ndbapi/flex_bench_mysql.cpp: mvdir storage/ndb/test/ndbapi/index.cpp: mvdir storage/ndb/test/ndbapi/index2.cpp: mvdir storage/ndb/test/ndbapi/initronja.cpp: mvdir storage/ndb/test/ndbapi/interpreterInTup.cpp: mvdir storage/ndb/test/ndbapi/mainAsyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/msa.cpp: mvdir storage/ndb/test/ndbapi/ndb_async1.cpp: mvdir storage/ndb/test/ndbapi/ndb_async2.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_populate.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction2.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction3.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction4.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction5.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction6.cpp: mvdir storage/ndb/test/ndbapi/restarter.cpp: mvdir storage/ndb/test/ndbapi/restarter2.cpp: mvdir storage/ndb/test/ndbapi/restarts.cpp: mvdir storage/ndb/test/ndbapi/size.cpp: mvdir storage/ndb/test/ndbapi/slow_select.cpp: mvdir storage/ndb/test/ndbapi/testBackup.cpp: mvdir storage/ndb/test/ndbapi/testBasic.cpp: mvdir storage/ndb/test/ndbapi/testBasicAsynch.cpp: mvdir storage/ndb/test/ndbapi/testBitfield.cpp: mvdir storage/ndb/test/ndbapi/testBlobs.cpp: mvdir storage/ndb/test/ndbapi/testDataBuffers.cpp: mvdir storage/ndb/test/ndbapi/testDeadlock.cpp: mvdir storage/ndb/test/ndbapi/testDict.cpp: mvdir storage/ndb/test/ndbapi/testGrep.cpp: mvdir storage/ndb/test/ndbapi/testGrepVerify.cpp: mvdir storage/ndb/test/ndbapi/testIndex.cpp: mvdir storage/ndb/test/ndbapi/testInterpreter.cpp: mvdir storage/ndb/test/ndbapi/testLcp.cpp: mvdir storage/ndb/test/ndbapi/testMgm.cpp: mvdir storage/ndb/test/ndbapi/testNdbApi.cpp: mvdir storage/ndb/test/ndbapi/testNodeRestart.cpp: mvdir storage/ndb/test/ndbapi/testOIBasic.cpp: mvdir storage/ndb/test/ndbapi/testOperations.cpp: mvdir storage/ndb/test/ndbapi/testOrderedIndex.cpp: mvdir storage/ndb/test/ndbapi/testPartitioning.cpp: mvdir storage/ndb/test/ndbapi/testReadPerf.cpp: mvdir storage/ndb/test/ndbapi/testRestartGci.cpp: mvdir storage/ndb/test/ndbapi/bank/Bank.cpp: mvdir storage/ndb/test/ndbapi/bank/Bank.hpp: mvdir storage/ndb/test/ndbapi/bank/BankLoad.cpp: mvdir storage/ndb/test/ndbapi/bank/Makefile.am: mvdir storage/ndb/test/ndbapi/bank/bankCreator.cpp: mvdir storage/ndb/test/ndbapi/bank/bankMakeGL.cpp: mvdir storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp: mvdir storage/ndb/test/ndbapi/bank/bankTimer.cpp: mvdir storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp: mvdir storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp: mvdir storage/ndb/test/ndbapi/bank/testBank.cpp: mvdir storage/ndb/test/ndbapi/bench/asyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/bench/dbGenerator.h: mvdir storage/ndb/test/ndbapi/bench/dbPopulate.cpp: mvdir storage/ndb/test/ndbapi/bench/dbPopulate.h: mvdir storage/ndb/test/ndbapi/bench/macros.h: mvdir storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/bench/mainPopulate.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_async1.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_async2.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/bench/ndb_schema.hpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp: mvdir storage/ndb/test/ndbapi/testScan.cpp: mvdir storage/ndb/test/ndbapi/testScanInterpreter.cpp: mvdir storage/ndb/test/ndbapi/testScanPerf.cpp: mvdir storage/ndb/test/ndbapi/testSystemRestart.cpp: mvdir storage/ndb/test/ndbapi/testTimeout.cpp: mvdir storage/ndb/test/ndbapi/testTransactions.cpp: mvdir storage/ndb/test/ndbapi/test_event.cpp: mvdir storage/ndb/test/ndbapi/test_event_multi_table.cpp: mvdir storage/ndb/test/ndbapi/userInterface.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp: mvdir storage/ndb/test/ndbapi/bench/testData.h: mvdir storage/ndb/test/ndbapi/bench/testDefinitions.h: mvdir storage/ndb/test/ndbapi/bench/userInterface.cpp: mvdir storage/ndb/test/ndbapi/bench/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/acid/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/create_tab/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am: mvdir storage/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl: mvdir storage/ndb/test/ndbapi/old_dirs/flexHammer/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexHammer/README: mvdir storage/ndb/test/ndbapi/old_dirs/flexScan/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexScan/README: mvdir storage/ndb/test/ndbapi/old_dirs/flexTT/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/indexTest/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/indexTest2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/README: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c: mvdir storage/ndb/test/ndbapi/old_dirs/restarter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/restarter2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/restarts/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/telco/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/telco/readme: mvdir storage/ndb/test/ndbapi/old_dirs/testBackup/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testBasic/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testBlobs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testDict/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testGrep/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testIndex/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testMgm/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt: mvdir storage/ndb/test/ndbapi/old_dirs/testOperations/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testScan/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testTimeout/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testTransactions/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/test_event/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/bcd.h: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/script/client_start: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/utv.h: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h: mvdir storage/ndb/test/ndbnet/test.run: mvdir storage/ndb/test/ndbnet/testError.run: mvdir storage/ndb/test/ndbnet/testMNF.run: mvdir storage/ndb/test/ndbnet/testNR.run: mvdir storage/ndb/test/ndbnet/testNR1.run: mvdir storage/ndb/test/ndbnet/testNR4.run: mvdir storage/ndb/test/ndbnet/testSRhang.run: mvdir storage/ndb/test/ndbnet/testTR295.run: mvdir storage/ndb/test/newtonapi/basic_test/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/basic/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/basic/basic.cpp: mvdir storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp: mvdir storage/ndb/test/newtonapi/basic_test/common.cpp: mvdir storage/ndb/test/newtonapi/basic_test/common.hpp: mvdir storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp: mvdir storage/ndb/test/newtonapi/basic_test/too_basic.cpp: mvdir storage/ndb/test/newtonapi/perf_test/Makefile: mvdir storage/ndb/test/newtonapi/perf_test/perf.cpp: mvdir storage/ndb/test/odbc/SQL99_test/Makefile: mvdir storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp: mvdir storage/ndb/test/odbc/SQL99_test/SQL99_test.h: mvdir storage/ndb/test/odbc/client/Makefile: mvdir storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp: mvdir storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp: mvdir storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp: mvdir storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp: mvdir storage/ndb/test/odbc/client/SQLBindColTest.cpp: mvdir storage/ndb/test/odbc/client/SQLBindParameterTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCancelTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp: mvdir storage/ndb/test/odbc/client/SQLConnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCopyDescTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDescribeColTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDisconnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLEndTranTest.cpp: mvdir storage/ndb/test/odbc/client/SQLErrorTest.cpp: mvdir storage/ndb/test/odbc/client/SQLExecDirectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLExecuteTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFetchTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetInfoTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp: mvdir storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLParamDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLPrepareTest.cpp: mvdir storage/ndb/test/odbc/client/SQLPutDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLRowCountTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLTablesTest.cpp: mvdir storage/ndb/test/odbc/client/SQLTransactTest.cpp: mvdir storage/ndb/test/odbc/client/common.hpp: mvdir storage/ndb/test/odbc/client/main.cpp: mvdir storage/ndb/test/odbc/dm-iodbc/Makefile: mvdir storage/ndb/test/odbc/dm-unixodbc/Makefile: mvdir storage/ndb/test/odbc/driver/Makefile: mvdir storage/ndb/test/odbc/driver/testOdbcDriver.cpp: mvdir storage/ndb/test/odbc/test_compiler/Makefile: mvdir storage/ndb/test/odbc/test_compiler/test_compiler.cpp: mvdir storage/ndb/test/run-test/16node-tests.txt: mvdir storage/ndb/test/run-test/Makefile.am: mvdir storage/ndb/test/run-test/README.ATRT: mvdir storage/ndb/test/run-test/README: mvdir storage/ndb/test/run-test/atrt-analyze-result.sh: mvdir storage/ndb/test/run-test/atrt-clear-result.sh: mvdir storage/ndb/test/run-test/atrt-example.tgz: mvdir storage/ndb/test/run-test/atrt-gather-result.sh: mvdir storage/ndb/test/run-test/atrt-mysql-test-run: mvdir storage/ndb/test/run-test/atrt-setup.sh: mvdir storage/ndb/test/run-test/atrt-testBackup: mvdir storage/ndb/test/run-test/basic.txt: mvdir storage/ndb/test/run-test/daily-basic-tests.txt: mvdir storage/ndb/test/run-test/daily-devel-tests.txt: mvdir storage/ndb/test/run-test/example.conf: mvdir storage/ndb/test/run-test/main.cpp: mvdir storage/ndb/test/run-test/make-config.sh: mvdir storage/ndb/test/run-test/make-html-reports.sh: mvdir storage/ndb/test/run-test/make-index.sh: mvdir storage/ndb/test/run-test/ndb-autotest.sh: mvdir storage/ndb/test/run-test/run-test.hpp: mvdir storage/ndb/test/src/CpcClient.cpp: mvdir storage/ndb/test/src/HugoAsynchTransactions.cpp: mvdir storage/ndb/test/src/HugoCalculator.cpp: mvdir storage/ndb/test/src/HugoOperations.cpp: mvdir storage/ndb/test/src/HugoTransactions.cpp: mvdir storage/ndb/test/src/Makefile.am: mvdir storage/ndb/test/src/NDBT_Error.cpp: mvdir storage/ndb/test/src/NDBT_Output.cpp: mvdir storage/ndb/test/src/NDBT_ResultRow.cpp: mvdir storage/ndb/test/src/NDBT_ReturnCodes.cpp: mvdir storage/ndb/test/src/NDBT_Table.cpp: mvdir storage/ndb/test/src/NDBT_Tables.cpp: mvdir storage/ndb/test/src/NDBT_Test.cpp: mvdir storage/ndb/test/src/NdbBackup.cpp: mvdir storage/ndb/test/src/NdbConfig.cpp: mvdir storage/ndb/test/src/NdbGrep.cpp: mvdir storage/ndb/test/src/NdbRestarter.cpp: mvdir storage/ndb/test/src/NdbRestarts.cpp: mvdir storage/ndb/test/src/NdbSchemaCon.cpp: mvdir storage/ndb/test/src/NdbSchemaOp.cpp: mvdir storage/ndb/test/src/UtilTransactions.cpp: mvdir storage/ndb/test/src/getarg.c: mvdir storage/ndb/test/tools/Makefile.am: mvdir storage/ndb/test/tools/copy_tab.cpp: mvdir storage/ndb/test/tools/cpcc.cpp: mvdir storage/ndb/test/tools/create_index.cpp: mvdir storage/ndb/test/tools/hugoCalculator.cpp: mvdir storage/ndb/test/tools/hugoFill.cpp: mvdir storage/ndb/test/tools/hugoLoad.cpp: mvdir storage/ndb/test/tools/hugoLockRecords.cpp: mvdir storage/ndb/test/tools/hugoPkDelete.cpp: mvdir storage/ndb/test/tools/hugoPkRead.cpp: mvdir storage/ndb/test/tools/hugoPkReadRecord.cpp: mvdir storage/ndb/test/tools/hugoPkUpdate.cpp: mvdir storage/ndb/test/tools/hugoScanRead.cpp: mvdir storage/ndb/test/tools/hugoScanUpdate.cpp: mvdir storage/ndb/test/tools/old_dirs/hugoCalculator/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoFill/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoLoad/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoLockRecords/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkDelete/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkRead/Makefile: mvdir storage/ndb/test/tools/restart.cpp: mvdir storage/ndb/test/tools/transproxy.cpp: mvdir storage/ndb/test/tools/verify_index.cpp: mvdir storage/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoScanRead/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile: mvdir storage/ndb/test/tools/old_dirs/restart/Makefile: mvdir storage/ndb/test/tools/old_dirs/transproxy/Makefile: mvdir storage/ndb/test/tools/old_dirs/verify_index/Makefile: mvdir storage/ndb/test/tools/old_dirs/waiter/waiter.cpp: mvdir storage/ndb/tools/Makefile.am: mvdir storage/ndb/tools/clean-links.sh: mvdir storage/ndb/tools/delete_all.cpp: mvdir storage/ndb/tools/desc.cpp: mvdir storage/ndb/tools/drop_index.cpp: mvdir storage/ndb/tools/drop_tab.cpp: mvdir storage/ndb/tools/listTables.cpp: mvdir storage/ndb/tools/make-errors.pl: mvdir storage/ndb/tools/make-links.sh: mvdir storage/ndb/tools/ndb_test_platform.cpp: mvdir storage/ndb/tools/ndbsql.cpp: mvdir storage/ndb/tools/old_dirs/copy_tab/Makefile: mvdir storage/ndb/tools/old_dirs/cpcc/Makefile: mvdir storage/ndb/tools/old_dirs/create_index/Makefile: mvdir storage/ndb/tools/old_dirs/delete_all/Makefile: mvdir storage/ndb/tools/old_dirs/desc/Makefile: mvdir storage/ndb/tools/old_dirs/drop_index/Makefile: mvdir storage/ndb/tools/old_dirs/drop_tab/Makefile: mvdir storage/ndb/tools/old_dirs/list_tables/Makefile: mvdir storage/ndb/tools/old_dirs/ndbnet/Makefile.PL: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbnet.pl: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbnetd.pl: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbrun: mvdir storage/ndb/tools/rgrep: mvdir storage/ndb/tools/select_all.cpp: mvdir storage/ndb/tools/select_count.cpp: mvdir storage/ndb/tools/waiter.cpp: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm: mvdir storage/ndb/tools/old_dirs/ndbsql/Makefile: mvdir storage/ndb/tools/old_dirs/select_all/Makefile: mvdir storage/ndb/tools/old_dirs/select_count/Makefile: mvdir storage/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java: mvdir storage/ndb/tools/restore/Restore.cpp: mvdir storage/ndb/tools/restore/Restore.hpp: mvdir storage/ndb/tools/restore/consumer.cpp: mvdir storage/ndb/tools/restore/consumer.hpp: mvdir storage/ndb/tools/restore/consumer_printer.cpp: mvdir storage/ndb/tools/restore/consumer_printer.hpp: mvdir storage/ndb/tools/restore/consumer_restore.cpp: mvdir storage/ndb/tools/restore/consumer_restore.hpp: mvdir storage/ndb/tools/restore/consumer_restorem.cpp: mvdir storage/ndb/tools/restore/restore_main.cpp: mvdir storage/bdb/LICENSE: mvdir storage/bdb/Makefile.in: mvdir storage/bdb/btree/bt_compare.c: mvdir storage/bdb/btree/bt_conv.c: mvdir storage/bdb/btree/bt_curadj.c: mvdir storage/bdb/btree/bt_cursor.c: mvdir storage/bdb/btree/bt_delete.c: mvdir storage/bdb/btree/bt_method.c: mvdir storage/bdb/btree/bt_open.c: mvdir storage/bdb/btree/bt_put.c: mvdir storage/bdb/btree/bt_rec.c: mvdir storage/bdb/btree/bt_reclaim.c: mvdir storage/bdb/btree/bt_recno.c: mvdir storage/bdb/btree/bt_rsearch.c: mvdir storage/bdb/btree/bt_search.c: mvdir storage/bdb/btree/bt_split.c: mvdir storage/bdb/btree/bt_stat.c: mvdir storage/bdb/btree/bt_upgrade.c: mvdir storage/bdb/btree/bt_verify.c: mvdir storage/bdb/btree/btree.src: mvdir storage/bdb/build_unix/.IGNORE_ME: mvdir storage/bdb/build_vxworks/BerkeleyDB.wsp: mvdir storage/bdb/build_vxworks/dbdemo/README: mvdir storage/bdb/build_win32/Berkeley_DB.dsw: mvdir storage/bdb/build_win32/app_dsp.src: mvdir storage/bdb/build_win32/build_all.dsp: mvdir storage/bdb/build_win32/db_java_xa.dsp: mvdir storage/bdb/build_win32/db_java_xaj.mak: mvdir storage/bdb/build_win32/db_lib.dsp: mvdir storage/bdb/build_win32/db_test.src: mvdir storage/bdb/build_win32/dbkill.cpp: mvdir storage/bdb/build_win32/dllmain.c: mvdir storage/bdb/build_win32/dynamic_dsp.src: mvdir storage/bdb/build_win32/java_dsp.src: mvdir storage/bdb/build_win32/libdb_tcl.def: mvdir storage/bdb/build_win32/libdbrc.src: mvdir storage/bdb/build_win32/srcfile_dsp.src: mvdir storage/bdb/build_win32/static_dsp.src: mvdir storage/bdb/build_win32/tcl_dsp.src: mvdir storage/bdb/clib/getcwd.c: mvdir storage/bdb/clib/getopt.c: mvdir storage/bdb/clib/memcmp.c: mvdir storage/bdb/clib/memmove.c: mvdir storage/bdb/clib/raise.c: mvdir storage/bdb/clib/snprintf.c: mvdir storage/bdb/clib/strcasecmp.c: mvdir storage/bdb/clib/strdup.c: mvdir storage/bdb/clib/strerror.c: mvdir storage/bdb/clib/vsnprintf.c: mvdir storage/bdb/common/db_byteorder.c: mvdir storage/bdb/common/db_err.c: mvdir storage/bdb/common/db_getlong.c: mvdir storage/bdb/common/db_idspace.c: mvdir storage/bdb/common/db_log2.c: mvdir storage/bdb/common/util_arg.c: mvdir storage/bdb/common/util_cache.c: mvdir storage/bdb/common/util_log.c: mvdir storage/bdb/common/util_sig.c: mvdir storage/bdb/cxx/cxx_db.cpp: mvdir storage/bdb/cxx/cxx_dbc.cpp: mvdir storage/bdb/cxx/cxx_dbt.cpp: mvdir storage/bdb/cxx/cxx_env.cpp: mvdir storage/bdb/cxx/cxx_except.cpp: mvdir storage/bdb/cxx/cxx_lock.cpp: mvdir storage/bdb/cxx/cxx_logc.cpp: mvdir storage/bdb/cxx/cxx_mpool.cpp: mvdir storage/bdb/cxx/cxx_txn.cpp: mvdir storage/bdb/db/crdel.src: mvdir storage/bdb/db/crdel_rec.c: mvdir storage/bdb/db/db.c: mvdir storage/bdb/db/db.src: mvdir storage/bdb/db/db_am.c: mvdir storage/bdb/db/db_cam.c: mvdir storage/bdb/db/db_conv.c: mvdir storage/bdb/db/db_dispatch.c: mvdir storage/bdb/db/db_dup.c: mvdir storage/bdb/db/db_iface.c: mvdir storage/bdb/db/db_join.c: mvdir storage/bdb/db/db_meta.c: mvdir storage/bdb/db/db_method.c: mvdir storage/bdb/db/db_open.c: mvdir storage/bdb/db/db_overflow.c: mvdir storage/bdb/db/db_pr.c: mvdir storage/bdb/db/db_rec.c: mvdir storage/bdb/db/db_reclaim.c: mvdir storage/bdb/db/db_remove.c: mvdir storage/bdb/db/db_rename.c: mvdir storage/bdb/db/db_ret.c: mvdir storage/bdb/db/db_truncate.c: mvdir storage/bdb/db/db_upg.c: mvdir storage/bdb/db/db_upg_opd.c: mvdir storage/bdb/db/db_vrfy.c: mvdir storage/bdb/db/db_vrfyutil.c: mvdir storage/bdb/db185/db185.c: mvdir storage/bdb/db185/db185_int.in: mvdir storage/bdb/db_archive/db_archive.c: mvdir storage/bdb/db_checkpoint/db_checkpoint.c: mvdir storage/bdb/db_deadlock/db_deadlock.c: mvdir storage/bdb/db_dump/db_dump.c: mvdir storage/bdb/db_dump185/db_dump185.c: mvdir storage/bdb/db_load/db_load.c: mvdir storage/bdb/db_printlog/README: mvdir storage/bdb/db_printlog/commit.awk: mvdir storage/bdb/db_printlog/count.awk: mvdir storage/bdb/db_printlog/db_printlog.c: mvdir storage/bdb/db_printlog/dbname.awk: mvdir storage/bdb/db_printlog/fileid.awk: mvdir storage/bdb/db_printlog/logstat.awk: mvdir storage/bdb/db_printlog/pgno.awk: mvdir storage/bdb/db_printlog/range.awk: mvdir storage/bdb/db_printlog/rectype.awk: mvdir storage/bdb/db_printlog/status.awk: mvdir storage/bdb/db_printlog/txn.awk: mvdir storage/bdb/db_recover/db_recover.c: mvdir storage/bdb/db_stat/db_stat.c: mvdir storage/bdb/db_upgrade/db_upgrade.c: mvdir storage/bdb/db_verify/db_verify.c: mvdir storage/bdb/dbinc/btree.h: mvdir storage/bdb/dbinc/crypto.h: mvdir storage/bdb/dbinc/cxx_common.h: mvdir storage/bdb/dbinc/cxx_except.h: mvdir storage/bdb/dbinc/cxx_int.h: mvdir storage/bdb/dbinc/db.in: mvdir storage/bdb/dbinc/db_185.in: mvdir storage/bdb/dbinc/db_am.h: mvdir storage/bdb/dbinc/db_cxx.in: mvdir storage/bdb/dbinc/db_dispatch.h: mvdir storage/bdb/dbinc/db_int.in: mvdir storage/bdb/dbinc/db_join.h: mvdir storage/bdb/dbinc/db_page.h: mvdir storage/bdb/dbinc/db_server_int.h: mvdir storage/bdb/dbinc/db_shash.h: mvdir storage/bdb/dbinc/db_swap.h: mvdir storage/bdb/dbinc/db_upgrade.h: mvdir storage/bdb/dbinc/db_verify.h: mvdir storage/bdb/dbinc/debug.h: mvdir storage/bdb/dbinc/fop.h: mvdir storage/bdb/dbinc/globals.h: mvdir storage/bdb/dbinc/hash.h: mvdir storage/bdb/dbinc/hmac.h: mvdir storage/bdb/dbinc/lock.h: mvdir storage/bdb/dbinc/log.h: mvdir storage/bdb/dbinc/mp.h: mvdir storage/bdb/dbinc/mutex.h: mvdir storage/bdb/dbinc/os.h: mvdir storage/bdb/dbinc/qam.h: mvdir storage/bdb/dbinc/queue.h: mvdir storage/bdb/dbinc/region.h: mvdir storage/bdb/dbinc/rep.h: mvdir storage/bdb/dbinc/shqueue.h: mvdir storage/bdb/dbinc/tcl_db.h: mvdir storage/bdb/dbinc/txn.h: mvdir storage/bdb/dbinc/xa.h: mvdir storage/bdb/dbm/dbm.c: mvdir storage/bdb/dbreg/dbreg.c: mvdir storage/bdb/dbreg/dbreg.src: mvdir storage/bdb/dbreg/dbreg_rec.c: mvdir storage/bdb/dbreg/dbreg_util.c: mvdir storage/bdb/dist/Makefile.in: mvdir storage/bdb/dist/RELEASE: mvdir storage/bdb/dist/buildrel: mvdir storage/bdb/dist/config.guess: mvdir storage/bdb/dist/config.sub: mvdir storage/bdb/dist/configure.ac: mvdir storage/bdb/dist/db.ecd.in: mvdir storage/bdb/dist/db.spec.in: mvdir storage/bdb/dist/gen_inc.awk: mvdir storage/bdb/dist/gen_rec.awk: mvdir storage/bdb/dist/gen_rpc.awk: mvdir storage/bdb/dist/install-sh: mvdir storage/bdb/dist/ltmain.sh: mvdir storage/bdb/dist/pubdef.in: mvdir storage/bdb/dist/s_all: mvdir storage/bdb/dist/s_config: mvdir storage/bdb/dist/aclocal/config.ac: mvdir storage/bdb/dist/aclocal/cxx.ac: mvdir storage/bdb/dist/aclocal/gcc.ac: mvdir storage/bdb/dist/aclocal/libtool.ac: mvdir storage/bdb/dist/s_crypto: mvdir storage/bdb/dist/s_dir: mvdir storage/bdb/dist/s_include: mvdir storage/bdb/dist/s_javah: mvdir storage/bdb/dist/s_java: mvdir storage/bdb/dist/s_perm: mvdir storage/bdb/dist/s_readme: mvdir storage/bdb/dist/s_recover: mvdir storage/bdb/dist/s_rpc: mvdir storage/bdb/dist/s_symlink: mvdir storage/bdb/dist/s_tags: mvdir storage/bdb/dist/s_test: mvdir storage/bdb/dist/s_vxworks: mvdir storage/bdb/dist/s_win32_dsp: mvdir storage/bdb/dist/s_win32: mvdir storage/bdb/dist/srcfiles.in: mvdir storage/bdb/dist/vx_buildcd: mvdir storage/bdb/dist/vx_config.in: mvdir storage/bdb/dist/win_config.in: mvdir storage/bdb/dist/win_exports.in: mvdir storage/bdb/dist/aclocal/mutex.ac: mvdir storage/bdb/dist/aclocal/options.ac: mvdir storage/bdb/dist/aclocal/programs.ac: mvdir storage/bdb/dist/aclocal/sosuffix.ac: mvdir storage/bdb/dist/aclocal/tcl.ac: mvdir storage/bdb/dist/aclocal/types.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_class.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_classpath.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_junit.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac: mvdir storage/bdb/dist/aclocal_java/ac_java_options.ac: mvdir storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_jar.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_java.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_java_works.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javac.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javah.ac: mvdir storage/bdb/dist/aclocal_java/ac_try_compile_java.ac: mvdir storage/bdb/dist/aclocal_java/ac_try_run_javac.ac: mvdir storage/bdb/dist/template/rec_ctemp: mvdir storage/bdb/dist/vx_2.0/BerkeleyDB.wpj: mvdir storage/bdb/dist/vx_2.0/wpj.in: mvdir storage/bdb/dist/vx_3.1/Makefile.custom: mvdir storage/bdb/dist/vx_3.1/cdf.1: mvdir storage/bdb/dist/vx_3.1/cdf.2: mvdir storage/bdb/dist/vx_3.1/cdf.3: mvdir storage/bdb/dist/vx_3.1/component.cdf: mvdir storage/bdb/dist/vx_3.1/component.wpj: mvdir storage/bdb/dist/vx_3.1/wpj.1: mvdir storage/bdb/dist/vx_3.1/wpj.2: mvdir storage/bdb/dist/vx_3.1/wpj.3: mvdir storage/bdb/dist/vx_3.1/wpj.4: mvdir storage/bdb/dist/vx_3.1/wpj.5: mvdir storage/bdb/dist/vx_setup/CONFIG.in: mvdir storage/bdb/dist/vx_setup/LICENSE.TXT: mvdir storage/bdb/dist/vx_setup/MESSAGES.TCL: mvdir storage/bdb/dist/vx_setup/README.in: mvdir storage/bdb/dist/vx_setup/SETUP.BMP: mvdir storage/bdb/dist/vx_setup/vx_allfile.in: mvdir storage/bdb/dist/vx_setup/vx_demofile.in: mvdir storage/bdb/dist/vx_setup/vx_setup.in: mvdir storage/bdb/env/db_salloc.c: mvdir storage/bdb/env/db_shash.c: mvdir storage/bdb/env/env_file.c: mvdir storage/bdb/env/env_method.c.b: mvdir storage/bdb/env/env_method.c: mvdir storage/bdb/env/env_open.c: mvdir storage/bdb/env/env_recover.c: mvdir storage/bdb/env/env_region.c: mvdir storage/bdb/fileops/fileops.src: mvdir storage/bdb/fileops/fop_basic.c: mvdir storage/bdb/fileops/fop_rec.c: mvdir storage/bdb/fileops/fop_util.c: mvdir storage/bdb/hash/hash.c: mvdir storage/bdb/hash/hash.src: mvdir storage/bdb/hash/hash_conv.c: mvdir storage/bdb/hash/hash_dup.c: mvdir storage/bdb/hash/hash_func.c: mvdir storage/bdb/hash/hash_meta.c: mvdir storage/bdb/hash/hash_method.c: mvdir storage/bdb/hash/hash_open.c: mvdir storage/bdb/hash/hash_page.c: mvdir storage/bdb/hash/hash_rec.c: mvdir storage/bdb/hash/hash_reclaim.c: mvdir storage/bdb/hash/hash_stat.c: mvdir storage/bdb/hash/hash_upgrade.c: mvdir storage/bdb/hash/hash_verify.c: mvdir storage/bdb/hmac/hmac.c: mvdir storage/bdb/hmac/sha1.c: mvdir storage/bdb/hsearch/hsearch.c: mvdir storage/bdb/libdb_java/checkapi.prl: mvdir storage/bdb/libdb_java/com_sleepycat_db_Db.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLock.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_Dbc.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_Dbt.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h: mvdir storage/bdb/libdb_java/java_Db.c: mvdir storage/bdb/libdb_java/java_DbEnv.c: mvdir storage/bdb/libdb_java/java_DbLock.c: mvdir storage/bdb/libdb_java/java_DbLogc.c: mvdir storage/bdb/libdb_java/java_DbLsn.c: mvdir storage/bdb/libdb_java/java_DbTxn.c: mvdir storage/bdb/libdb_java/java_DbUtil.c: mvdir storage/bdb/libdb_java/java_DbXAResource.c: mvdir storage/bdb/libdb_java/java_Dbc.c: mvdir storage/bdb/libdb_java/java_Dbt.c: mvdir storage/bdb/libdb_java/java_info.c: mvdir storage/bdb/libdb_java/java_info.h: mvdir storage/bdb/libdb_java/java_locked.c: mvdir storage/bdb/libdb_java/java_locked.h: mvdir storage/bdb/libdb_java/java_util.c: mvdir storage/bdb/libdb_java/java_util.h: mvdir storage/bdb/lock/Design: mvdir storage/bdb/lock/lock.c: mvdir storage/bdb/lock/lock_deadlock.c: mvdir storage/bdb/lock/lock_method.c: mvdir storage/bdb/lock/lock_region.c: mvdir storage/bdb/lock/lock_stat.c: mvdir storage/bdb/lock/lock_util.c: mvdir storage/bdb/log/log.c: mvdir storage/bdb/log/log_archive.c: mvdir storage/bdb/log/log_compare.c: mvdir storage/bdb/log/log_get.c: mvdir storage/bdb/log/log_method.c: mvdir storage/bdb/log/log_put.c: mvdir storage/bdb/mp/mp_alloc.c: mvdir storage/bdb/mp/mp_bh.c: mvdir storage/bdb/mp/mp_fget.c: mvdir storage/bdb/mp/mp_fopen.c: mvdir storage/bdb/mp/mp_fput.c: mvdir storage/bdb/mp/mp_fset.c: mvdir storage/bdb/mp/mp_method.c: mvdir storage/bdb/mp/mp_region.c: mvdir storage/bdb/mp/mp_register.c: mvdir storage/bdb/mp/mp_stat.c: mvdir storage/bdb/mp/mp_sync.c: mvdir storage/bdb/mp/mp_trickle.c: mvdir storage/bdb/mutex/README: mvdir storage/bdb/mutex/mut_fcntl.c: mvdir storage/bdb/mutex/mut_pthread.c: mvdir storage/bdb/mutex/mut_tas.c: mvdir storage/bdb/mutex/mut_win32.c: mvdir storage/bdb/mutex/mutex.c: mvdir storage/bdb/mutex/tm.c: mvdir storage/bdb/mutex/uts4_cc.s: mvdir storage/bdb/os/os_abs.c: mvdir storage/bdb/os/os_alloc.c: mvdir storage/bdb/os/os_clock.c: mvdir storage/bdb/os/os_config.c: mvdir storage/bdb/os/os_dir.c: mvdir storage/bdb/os/os_errno.c: mvdir storage/bdb/os/os_fid.c: mvdir storage/bdb/os/os_fsync.c: mvdir storage/bdb/os/os_handle.c: mvdir storage/bdb/os/os_id.c: mvdir storage/bdb/os/os_map.c: mvdir storage/bdb/os/os_method.c: mvdir storage/bdb/os/os_oflags.c: mvdir storage/bdb/os/os_open.c: mvdir storage/bdb/os/os_region.c: mvdir storage/bdb/os/os_rename.c: mvdir storage/bdb/os/os_root.c: mvdir storage/bdb/os/os_rpath.c: mvdir storage/bdb/os/os_rw.c: mvdir storage/bdb/os/os_seek.c: mvdir storage/bdb/os/os_sleep.c: mvdir storage/bdb/os/os_spin.c: mvdir storage/bdb/os/os_stat.c: mvdir storage/bdb/os/os_tmpdir.c: mvdir storage/bdb/os/os_unlink.c: mvdir storage/bdb/os_vxworks/os_vx_abs.c: mvdir storage/bdb/os_vxworks/os_vx_config.c: mvdir storage/bdb/os_vxworks/os_vx_map.c: mvdir storage/bdb/os_win32/os_abs.c: mvdir storage/bdb/os_win32/os_clock.c: mvdir storage/bdb/os_win32/os_config.c: mvdir storage/bdb/os_win32/os_dir.c: mvdir storage/bdb/os_win32/os_errno.c: mvdir storage/bdb/os_win32/os_fid.c: mvdir storage/bdb/os_win32/os_fsync.c: mvdir storage/bdb/os_win32/os_handle.c: mvdir storage/bdb/os_win32/os_map.c: mvdir storage/bdb/os_win32/os_open.c: mvdir storage/bdb/os_win32/os_rename.c: mvdir storage/bdb/os_win32/os_rw.c: mvdir storage/bdb/os_win32/os_seek.c: mvdir storage/bdb/os_win32/os_sleep.c: mvdir storage/bdb/os_win32/os_spin.c: mvdir storage/bdb/os_win32/os_stat.c: mvdir storage/bdb/os_win32/os_type.c: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs: mvdir storage/bdb/perl/BerkeleyDB/Changes: mvdir storage/bdb/perl/BerkeleyDB/MANIFEST: mvdir storage/bdb/perl/BerkeleyDB/Makefile.PL: mvdir storage/bdb/perl/BerkeleyDB/README: mvdir storage/bdb/perl/BerkeleyDB/Todo: mvdir storage/bdb/perl/BerkeleyDB/config.in: mvdir storage/bdb/perl/BerkeleyDB/constants.h: mvdir storage/bdb/perl/BerkeleyDB/constants.xs: mvdir storage/bdb/perl/BerkeleyDB/dbinfo: mvdir storage/bdb/perl/BerkeleyDB/mkconsts: mvdir storage/bdb/perl/BerkeleyDB/mkpod: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm: mvdir storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl: mvdir storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl: mvdir storage/bdb/perl/BerkeleyDB/hints/solaris.pl: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_01: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_02: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_03: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_04: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_05: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_01: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_02: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_03: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005: mvdir storage/bdb/perl/BerkeleyDB/patches/5.6.0: mvdir storage/bdb/perl/BerkeleyDB/ppport.h: mvdir storage/bdb/perl/BerkeleyDB/scan: mvdir storage/bdb/perl/BerkeleyDB/t/btree.t: mvdir storage/bdb/perl/BerkeleyDB/t/destroy.t: mvdir storage/bdb/perl/BerkeleyDB/t/env.t: mvdir storage/bdb/perl/BerkeleyDB/t/examples.t.T: mvdir storage/bdb/perl/BerkeleyDB/t/examples.t: mvdir storage/bdb/perl/BerkeleyDB/t/examples3.t.T: mvdir storage/bdb/perl/BerkeleyDB/t/examples3.t: mvdir storage/bdb/perl/BerkeleyDB/t/filter.t: mvdir storage/bdb/perl/BerkeleyDB/t/hash.t: mvdir storage/bdb/perl/BerkeleyDB/t/join.t: mvdir storage/bdb/perl/BerkeleyDB/t/mldbm.t: mvdir storage/bdb/perl/BerkeleyDB/t/queue.t: mvdir storage/bdb/perl/BerkeleyDB/t/recno.t: mvdir storage/bdb/perl/BerkeleyDB/t/strict.t: mvdir storage/bdb/perl/BerkeleyDB/t/subdb.t: mvdir storage/bdb/perl/BerkeleyDB/t/txn.t: mvdir storage/bdb/perl/BerkeleyDB/typemap: mvdir storage/bdb/perl/BerkeleyDB/t/unknown.t: mvdir storage/bdb/perl/BerkeleyDB/t/util.pm: mvdir storage/bdb/perl/DB_File/Changes: mvdir storage/bdb/perl/DB_File/DB_File.pm: mvdir storage/bdb/perl/DB_File/DB_File.xs: mvdir storage/bdb/perl/DB_File/DB_File_BS: mvdir storage/bdb/perl/DB_File/MANIFEST: mvdir storage/bdb/perl/DB_File/Makefile.PL: mvdir storage/bdb/perl/DB_File/README: mvdir storage/bdb/perl/DB_File/config.in: mvdir storage/bdb/perl/DB_File/dbinfo: mvdir storage/bdb/perl/DB_File/fallback.h: mvdir storage/bdb/perl/DB_File/fallback.xs: mvdir storage/bdb/perl/DB_File/hints/dynixptx.pl: mvdir storage/bdb/perl/DB_File/hints/sco.pl: mvdir storage/bdb/perl/DB_File/patches/5.004_01: mvdir storage/bdb/perl/DB_File/patches/5.004_02: mvdir storage/bdb/perl/DB_File/patches/5.004_03: mvdir storage/bdb/perl/DB_File/patches/5.004_04: mvdir storage/bdb/perl/DB_File/patches/5.004_05: mvdir storage/bdb/perl/DB_File/patches/5.004: mvdir storage/bdb/perl/DB_File/patches/5.005_01: mvdir storage/bdb/perl/DB_File/patches/5.005_02: mvdir storage/bdb/perl/DB_File/patches/5.005_03: mvdir storage/bdb/perl/DB_File/patches/5.005: mvdir storage/bdb/perl/DB_File/patches/5.6.0: mvdir storage/bdb/perl/DB_File/ppport.h: mvdir storage/bdb/perl/DB_File/t/db-btree.t: mvdir storage/bdb/perl/DB_File/t/db-hash.t: mvdir storage/bdb/perl/DB_File/t/db-recno.t: mvdir storage/bdb/perl/DB_File/typemap: mvdir storage/bdb/perl/DB_File/version.c: mvdir storage/bdb/qam/qam.c: mvdir storage/bdb/qam/qam.src: mvdir storage/bdb/qam/qam_conv.c: mvdir storage/bdb/qam/qam_files.c: mvdir storage/bdb/qam/qam_method.c: mvdir storage/bdb/qam/qam_open.c: mvdir storage/bdb/qam/qam_rec.c: mvdir storage/bdb/qam/qam_stat.c: mvdir storage/bdb/qam/qam_upgrade.c: mvdir storage/bdb/qam/qam_verify.c: mvdir storage/bdb/rep/rep_method.c: mvdir storage/bdb/rep/rep_record.c: mvdir storage/bdb/rep/rep_region.c: mvdir storage/bdb/rep/rep_util.c: mvdir storage/bdb/rpc_client/client.c: mvdir storage/bdb/rpc_client/gen_client_ret.c: mvdir storage/bdb/rpc_server/c/db_server_proc.c.in: mvdir storage/bdb/rpc_server/c/db_server_util.c: mvdir storage/bdb/rpc_server/clsrv.html: mvdir storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp: mvdir storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp: mvdir storage/bdb/rpc_server/java/DbDispatcher.java: mvdir storage/bdb/rpc_server/java/DbServer.java: mvdir storage/bdb/rpc_server/java/FreeList.java: mvdir storage/bdb/rpc_server/java/LocalIterator.java: mvdir storage/bdb/rpc_server/java/README: mvdir storage/bdb/rpc_server/java/RpcDb.java: mvdir storage/bdb/rpc_server/java/RpcDbEnv.java: mvdir storage/bdb/rpc_server/java/RpcDbTxn.java: mvdir storage/bdb/rpc_server/java/RpcDbc.java: mvdir storage/bdb/rpc_server/java/Timer.java: mvdir storage/bdb/rpc_server/java/jrpcgen.jar: mvdir storage/bdb/rpc_server/java/oncrpc.jar: mvdir storage/bdb/rpc_server/rpc.src: mvdir storage/bdb/rpc_server/java/gen/DbServerStub.java: mvdir storage/bdb/rpc_server/java/gen/__db_associate_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_associate_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_create_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_create_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_cursor_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_cursor_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_del_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_del_reply.java: mvdir storage/bdb/rpc_server/java/s_jrpcgen: mvdir storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_flags_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_flags_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_get_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_get_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_join_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_join_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_key_range_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_key_range_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_lorder_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_lorder_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_open_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_open_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_pget_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_pget_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_put_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_put_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_len_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_len_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_remove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_remove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_rename_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_rename_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_stat_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_stat_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_sync_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_sync_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_truncate_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_truncate_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_count_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_count_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_del_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_del_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_get_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_get_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_put_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_put_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_create_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_create_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_flags_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_flags_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_open_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_open_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_remove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_remove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_abort_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_abort_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_begin_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_begin_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_commit_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_commit_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_discard_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_discard_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_recover_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_recover_reply.java: mvdir storage/bdb/rpc_server/java/gen/db_server.java: mvdir storage/bdb/tcl/tcl_compat.c: mvdir storage/bdb/tcl/tcl_db.c: mvdir storage/bdb/tcl/tcl_db_pkg.c: mvdir storage/bdb/tcl/docs/db.html: mvdir storage/bdb/tcl/docs/env.html: mvdir storage/bdb/tcl/docs/historic.html: mvdir storage/bdb/tcl/docs/index.html: mvdir storage/bdb/tcl/docs/library.html: mvdir storage/bdb/tcl/docs/lock.html: mvdir storage/bdb/tcl/docs/log.html: mvdir storage/bdb/tcl/docs/mpool.html: mvdir storage/bdb/tcl/docs/rep.html: mvdir storage/bdb/tcl/docs/test.html: mvdir storage/bdb/tcl/docs/txn.html: mvdir storage/bdb/tcl/tcl_dbcursor.c: mvdir storage/bdb/tcl/tcl_env.c: mvdir storage/bdb/tcl/tcl_internal.c: mvdir storage/bdb/tcl/tcl_lock.c: mvdir storage/bdb/tcl/tcl_log.c: mvdir storage/bdb/tcl/tcl_mp.c: mvdir storage/bdb/tcl/tcl_rep.c: mvdir storage/bdb/tcl/tcl_txn.c: mvdir storage/bdb/tcl/tcl_util.c: mvdir storage/bdb/test/archive.tcl: mvdir storage/bdb/test/bigfile001.tcl: mvdir storage/bdb/test/bigfile002.tcl: mvdir storage/bdb/test/byteorder.tcl: mvdir storage/bdb/test/conscript.tcl: mvdir storage/bdb/test/dbm.tcl: mvdir storage/bdb/test/dbscript.tcl: mvdir storage/bdb/test/ddoyscript.tcl: mvdir storage/bdb/test/ddscript.tcl: mvdir storage/bdb/test/dead001.tcl: mvdir storage/bdb/test/dead002.tcl: mvdir storage/bdb/test/dead003.tcl: mvdir storage/bdb/test/dead004.tcl: mvdir storage/bdb/test/dead005.tcl: mvdir storage/bdb/test/dead006.tcl: mvdir storage/bdb/test/dead007.tcl: mvdir storage/bdb/test/env001.tcl: mvdir storage/bdb/test/env002.tcl: mvdir storage/bdb/test/env003.tcl: mvdir storage/bdb/test/env004.tcl: mvdir storage/bdb/test/env005.tcl: mvdir storage/bdb/test/env006.tcl: mvdir storage/bdb/test/env007.tcl: mvdir storage/bdb/test/env008.tcl: mvdir storage/bdb/test/env009.tcl: mvdir storage/bdb/test/env010.tcl: mvdir storage/bdb/test/env011.tcl: mvdir storage/bdb/test/hsearch.tcl: mvdir storage/bdb/test/join.tcl: mvdir storage/bdb/test/lock001.tcl: mvdir storage/bdb/test/lock002.tcl: mvdir storage/bdb/test/lock003.tcl: mvdir storage/bdb/test/lock004.tcl: mvdir storage/bdb/test/lock005.tcl: mvdir storage/bdb/test/lockscript.tcl: mvdir storage/bdb/test/log001.tcl: mvdir storage/bdb/test/log002.tcl: mvdir storage/bdb/test/log003.tcl: mvdir storage/bdb/test/log004.tcl: mvdir storage/bdb/test/log005.tcl: mvdir storage/bdb/test/logtrack.tcl: mvdir storage/bdb/test/mdbscript.tcl: mvdir storage/bdb/test/memp001.tcl: mvdir storage/bdb/test/memp002.tcl: mvdir storage/bdb/test/memp003.tcl: mvdir storage/bdb/test/mpoolscript.tcl: mvdir storage/bdb/test/mutex001.tcl: mvdir storage/bdb/test/mutex002.tcl: mvdir storage/bdb/test/mutex003.tcl: mvdir storage/bdb/test/mutexscript.tcl: mvdir storage/bdb/test/ndbm.tcl: mvdir storage/bdb/test/parallel.tcl: mvdir storage/bdb/test/recd001.tcl: mvdir storage/bdb/test/recd002.tcl: mvdir storage/bdb/test/recd003.tcl: mvdir storage/bdb/test/recd004.tcl: mvdir storage/bdb/test/recd005.tcl: mvdir storage/bdb/test/recd006.tcl: mvdir storage/bdb/test/recd007.tcl: mvdir storage/bdb/test/recd008.tcl: mvdir storage/bdb/test/recd009.tcl: mvdir storage/bdb/test/recd010.tcl: mvdir storage/bdb/test/recd011.tcl: mvdir storage/bdb/test/recd012.tcl: mvdir storage/bdb/test/recd013.tcl: mvdir storage/bdb/test/recd014.tcl: mvdir storage/bdb/test/recd015.tcl: mvdir storage/bdb/test/recd016.tcl: mvdir storage/bdb/test/recd017.tcl: mvdir storage/bdb/test/recd018.tcl: mvdir storage/bdb/test/recd019.tcl: mvdir storage/bdb/test/recd020.tcl: mvdir storage/bdb/test/recd15scr.tcl: mvdir storage/bdb/test/recdscript.tcl: mvdir storage/bdb/test/rep001.tcl: mvdir storage/bdb/test/rep002.tcl: mvdir storage/bdb/test/rep003.tcl: mvdir storage/bdb/test/rep004.tcl: mvdir storage/bdb/test/rep005.tcl: mvdir storage/bdb/test/reputils.tcl: mvdir storage/bdb/test/rpc001.tcl: mvdir storage/bdb/test/rpc002.tcl: mvdir storage/bdb/test/rpc003.tcl: mvdir storage/bdb/test/rpc004.tcl: mvdir storage/bdb/test/rpc005.tcl: mvdir storage/bdb/test/rsrc001.tcl: mvdir storage/bdb/test/rsrc002.tcl: mvdir storage/bdb/test/rsrc003.tcl: mvdir storage/bdb/test/rsrc004.tcl: mvdir storage/bdb/test/sdb001.tcl: mvdir storage/bdb/test/sdb002.tcl: mvdir storage/bdb/test/sdb003.tcl: mvdir storage/bdb/test/sdb004.tcl: mvdir storage/bdb/test/sdb005.tcl: mvdir storage/bdb/test/sdb006.tcl: mvdir storage/bdb/test/sdb007.tcl: mvdir storage/bdb/test/sdb008.tcl: mvdir storage/bdb/test/sdb009.tcl: mvdir storage/bdb/test/sdb010.tcl: mvdir storage/bdb/test/sdb011.tcl: mvdir storage/bdb/test/sdb012.tcl: mvdir storage/bdb/test/sdbscript.tcl: mvdir storage/bdb/test/sdbtest001.tcl: mvdir storage/bdb/test/sdbtest002.tcl: mvdir storage/bdb/test/sdbutils.tcl: mvdir storage/bdb/test/sec001.tcl: mvdir storage/bdb/test/sec002.tcl: mvdir storage/bdb/test/shelltest.tcl: mvdir storage/bdb/test/si001.tcl: mvdir storage/bdb/test/si002.tcl: mvdir storage/bdb/test/si003.tcl: mvdir storage/bdb/test/si004.tcl: mvdir storage/bdb/test/si005.tcl: mvdir storage/bdb/test/si006.tcl: mvdir storage/bdb/test/sindex.tcl: mvdir storage/bdb/test/sysscript.tcl: mvdir storage/bdb/test/test.tcl: mvdir storage/bdb/test/test001.tcl: mvdir storage/bdb/test/test002.tcl: mvdir storage/bdb/test/test003.tcl: mvdir storage/bdb/test/test004.tcl: mvdir storage/bdb/test/test005.tcl: mvdir storage/bdb/test/test006.tcl: mvdir storage/bdb/test/test007.tcl: mvdir storage/bdb/test/test008.tcl: mvdir storage/bdb/test/test009.tcl: mvdir storage/bdb/test/test010.tcl: mvdir storage/bdb/test/test011.tcl: mvdir storage/bdb/test/test012.tcl: mvdir storage/bdb/test/test013.tcl: mvdir storage/bdb/test/test014.tcl: mvdir storage/bdb/test/test015.tcl: mvdir storage/bdb/test/test016.tcl: mvdir storage/bdb/test/test017.tcl: mvdir storage/bdb/test/test018.tcl: mvdir storage/bdb/test/test019.tcl: mvdir storage/bdb/test/test020.tcl: mvdir storage/bdb/test/test021.tcl: mvdir storage/bdb/test/test022.tcl: mvdir storage/bdb/test/test023.tcl: mvdir storage/bdb/test/test024.tcl: mvdir storage/bdb/test/test025.tcl: mvdir storage/bdb/test/test026.tcl: mvdir storage/bdb/test/test027.tcl: mvdir storage/bdb/test/test028.tcl: mvdir storage/bdb/test/test029.tcl: mvdir storage/bdb/test/test030.tcl: mvdir storage/bdb/test/test031.tcl: mvdir storage/bdb/test/test032.tcl: mvdir storage/bdb/test/test033.tcl: mvdir storage/bdb/test/test034.tcl: mvdir storage/bdb/test/test035.tcl: mvdir storage/bdb/test/test036.tcl: mvdir storage/bdb/test/test037.tcl: mvdir storage/bdb/test/test038.tcl: mvdir storage/bdb/test/test039.tcl: mvdir storage/bdb/test/test040.tcl: mvdir storage/bdb/test/test041.tcl: mvdir storage/bdb/test/test042.tcl: mvdir storage/bdb/test/test043.tcl: mvdir storage/bdb/test/test044.tcl: mvdir storage/bdb/test/test045.tcl: mvdir storage/bdb/test/test046.tcl: mvdir storage/bdb/test/test047.tcl: mvdir storage/bdb/test/test048.tcl: mvdir storage/bdb/test/test049.tcl: mvdir storage/bdb/test/test050.tcl: mvdir storage/bdb/test/test051.tcl: mvdir storage/bdb/test/test052.tcl: mvdir storage/bdb/test/test053.tcl: mvdir storage/bdb/test/test054.tcl: mvdir storage/bdb/test/test055.tcl: mvdir storage/bdb/test/test056.tcl: mvdir storage/bdb/test/test057.tcl: mvdir storage/bdb/test/test058.tcl: mvdir storage/bdb/test/test059.tcl: mvdir storage/bdb/test/test060.tcl: mvdir storage/bdb/test/test061.tcl: mvdir storage/bdb/test/test062.tcl: mvdir storage/bdb/test/test063.tcl: mvdir storage/bdb/test/test064.tcl: mvdir storage/bdb/test/test065.tcl: mvdir storage/bdb/test/test066.tcl: mvdir storage/bdb/test/test067.tcl: mvdir storage/bdb/test/test068.tcl: mvdir storage/bdb/test/test069.tcl: mvdir storage/bdb/test/test070.tcl: mvdir storage/bdb/test/test071.tcl: mvdir storage/bdb/test/test072.tcl: mvdir storage/bdb/test/test073.tcl: mvdir storage/bdb/test/test074.tcl: mvdir storage/bdb/test/test075.tcl: mvdir storage/bdb/test/test076.tcl: mvdir storage/bdb/test/test077.tcl: mvdir storage/bdb/test/test078.tcl: mvdir storage/bdb/test/test079.tcl: mvdir storage/bdb/test/test080.tcl: mvdir storage/bdb/test/test081.tcl: mvdir storage/bdb/test/test082.tcl: mvdir storage/bdb/test/test083.tcl: mvdir storage/bdb/test/test084.tcl: mvdir storage/bdb/test/test085.tcl: mvdir storage/bdb/test/test086.tcl: mvdir storage/bdb/test/test087.tcl: mvdir storage/bdb/test/test088.tcl: mvdir storage/bdb/test/test089.tcl: mvdir storage/bdb/test/test090.tcl: mvdir storage/bdb/test/test091.tcl: mvdir storage/bdb/test/test092.tcl: mvdir storage/bdb/test/test093.tcl: mvdir storage/bdb/test/test094.tcl: mvdir storage/bdb/test/test095.tcl: mvdir storage/bdb/test/test096.tcl: mvdir storage/bdb/test/test097.tcl: mvdir storage/bdb/test/test098.tcl: mvdir storage/bdb/test/test099.tcl: mvdir storage/bdb/test/test100.tcl: mvdir storage/bdb/test/test101.tcl: mvdir storage/bdb/test/testparams.tcl: mvdir storage/bdb/test/testutils.tcl: mvdir storage/bdb/test/txn001.tcl: mvdir storage/bdb/test/txn002.tcl: mvdir storage/bdb/test/txn003.tcl: mvdir storage/bdb/test/txn004.tcl: mvdir storage/bdb/test/txn005.tcl: mvdir storage/bdb/test/txn006.tcl: mvdir storage/bdb/test/txn007.tcl: mvdir storage/bdb/test/txn008.tcl: mvdir storage/bdb/test/txn009.tcl: mvdir storage/bdb/test/txnscript.tcl: mvdir storage/bdb/test/update.tcl: mvdir storage/bdb/test/scr001/chk.code: mvdir storage/bdb/test/scr002/chk.def: mvdir storage/bdb/test/scr003/chk.define: mvdir storage/bdb/test/scr004/chk.javafiles: mvdir storage/bdb/test/scr005/chk.nl: mvdir storage/bdb/test/scr006/chk.offt: mvdir storage/bdb/test/scr007/chk.proto: mvdir storage/bdb/test/scr008/chk.pubdef: mvdir storage/bdb/test/scr009/chk.srcfiles: mvdir storage/bdb/test/scr010/chk.str: mvdir storage/bdb/test/scr010/spell.ok: mvdir storage/bdb/test/scr011/chk.tags: mvdir storage/bdb/test/scr012/chk.vx_code: mvdir storage/bdb/test/scr013/chk.stats: mvdir storage/bdb/test/scr014/chk.err: mvdir storage/bdb/test/scr015/README: mvdir storage/bdb/test/scr015/TestConstruct01.cpp: mvdir storage/bdb/test/scr015/TestConstruct01.testerr: mvdir storage/bdb/test/scr015/TestConstruct01.testout: mvdir storage/bdb/test/scr015/TestExceptInclude.cpp: mvdir storage/bdb/test/scr015/TestGetSetMethods.cpp: mvdir storage/bdb/test/scr015/TestKeyRange.cpp: mvdir storage/bdb/test/scr015/TestKeyRange.testin: mvdir storage/bdb/test/scr015/TestKeyRange.testout: mvdir storage/bdb/test/upgrade.tcl: mvdir storage/bdb/test/wordlist: mvdir storage/bdb/test/wrap.tcl: mvdir storage/bdb/test/scr015/TestLogc.cpp: mvdir storage/bdb/test/scr015/TestLogc.testout: mvdir storage/bdb/test/scr015/TestSimpleAccess.cpp: mvdir storage/bdb/test/scr015/TestSimpleAccess.testout: mvdir storage/bdb/test/scr015/TestTruncate.cpp: mvdir storage/bdb/test/scr015/TestTruncate.testout: mvdir storage/bdb/test/scr015/chk.cxxtests: mvdir storage/bdb/test/scr015/ignore: mvdir storage/bdb/test/scr015/testall: mvdir storage/bdb/test/scr015/testone: mvdir storage/bdb/test/scr016/CallbackTest.java: mvdir storage/bdb/test/scr016/CallbackTest.testout: mvdir storage/bdb/test/scr016/README: mvdir storage/bdb/test/scr016/TestAppendRecno.java: mvdir storage/bdb/test/scr016/TestAppendRecno.testout: mvdir storage/bdb/test/scr016/TestAssociate.java: mvdir storage/bdb/test/scr016/TestAssociate.testout: mvdir storage/bdb/test/scr016/TestClosedDb.java: mvdir storage/bdb/test/scr016/TestClosedDb.testout: mvdir storage/bdb/test/scr016/TestConstruct01.java: mvdir storage/bdb/test/scr016/TestConstruct01.testerr: mvdir storage/bdb/test/scr016/TestConstruct01.testout: mvdir storage/bdb/test/scr016/TestConstruct02.java: mvdir storage/bdb/test/scr016/TestConstruct02.testout: mvdir storage/bdb/test/scr016/TestDbtFlags.java: mvdir storage/bdb/test/scr016/TestDbtFlags.testerr: mvdir storage/bdb/test/scr016/TestDbtFlags.testout: mvdir storage/bdb/test/scr016/TestGetSetMethods.java: mvdir storage/bdb/test/scr016/TestKeyRange.java: mvdir storage/bdb/test/scr016/TestKeyRange.testout: mvdir storage/bdb/test/scr016/TestLockVec.java: mvdir storage/bdb/test/scr016/TestLockVec.testout: mvdir storage/bdb/test/scr016/TestLogc.java: mvdir storage/bdb/test/scr016/TestLogc.testout: mvdir storage/bdb/test/scr016/TestOpenEmpty.java: mvdir storage/bdb/test/scr016/TestOpenEmpty.testerr: mvdir storage/bdb/test/scr016/TestReplication.java: mvdir storage/bdb/test/scr016/TestRpcServer.java: mvdir storage/bdb/test/scr016/TestSameDbt.java: mvdir storage/bdb/test/scr016/TestSameDbt.testout: mvdir storage/bdb/test/scr016/TestSimpleAccess.java: mvdir storage/bdb/test/scr016/TestSimpleAccess.testout: mvdir storage/bdb/test/scr016/TestStat.java: mvdir storage/bdb/test/scr016/TestStat.testout: mvdir storage/bdb/test/scr016/TestTruncate.java: mvdir storage/bdb/test/scr016/TestTruncate.testout: mvdir storage/bdb/test/scr016/TestUtil.java: mvdir storage/bdb/test/scr016/TestXAServlet.java: mvdir storage/bdb/test/scr016/chk.javatests: mvdir storage/bdb/test/scr016/ignore: mvdir storage/bdb/test/scr016/testall: mvdir storage/bdb/test/scr016/testone: mvdir storage/bdb/test/scr017/O.BH: mvdir storage/bdb/test/scr017/O.R: mvdir storage/bdb/test/scr017/chk.db185: mvdir storage/bdb/test/scr017/t.c: mvdir storage/bdb/test/scr018/chk.comma: mvdir storage/bdb/test/scr018/t.c: mvdir storage/bdb/test/scr019/chk.include: mvdir storage/bdb/test/scr020/chk.inc: mvdir storage/bdb/test/scr021/chk.flags: mvdir storage/bdb/test/scr022/chk.rr: mvdir storage/bdb/txn/txn.c: mvdir storage/bdb/txn/txn.src: mvdir storage/bdb/txn/txn_method.c: mvdir storage/bdb/txn/txn_rec.c: mvdir storage/bdb/txn/txn_recover.c: mvdir storage/bdb/txn/txn_region.c: mvdir storage/bdb/txn/txn_stat.c: mvdir storage/bdb/txn/txn_util.c: mvdir storage/bdb/xa/xa.c: mvdir storage/bdb/xa/xa_db.c: mvdir storage/bdb/xa/xa_map.c: mvdir
Diffstat (limited to 'storage/bdb')
-rw-r--r--storage/bdb/LICENSE102
-rw-r--r--storage/bdb/Makefile.in57
-rw-r--r--storage/bdb/btree/bt_compare.c211
-rw-r--r--storage/bdb/btree/bt_conv.c102
-rw-r--r--storage/bdb/btree/bt_curadj.c582
-rw-r--r--storage/bdb/btree/bt_cursor.c2794
-rw-r--r--storage/bdb/btree/bt_delete.c460
-rw-r--r--storage/bdb/btree/bt_method.c388
-rw-r--r--storage/bdb/btree/bt_open.c605
-rw-r--r--storage/bdb/btree/bt_put.c854
-rw-r--r--storage/bdb/btree/bt_rec.c971
-rw-r--r--storage/bdb/btree/bt_reclaim.c86
-rw-r--r--storage/bdb/btree/bt_recno.c1327
-rw-r--r--storage/bdb/btree/bt_rsearch.c442
-rw-r--r--storage/bdb/btree/bt_search.c475
-rw-r--r--storage/bdb/btree/bt_split.c1177
-rw-r--r--storage/bdb/btree/bt_stat.c481
-rw-r--r--storage/bdb/btree/bt_upgrade.c162
-rw-r--r--storage/bdb/btree/bt_verify.c2387
-rw-r--r--storage/bdb/btree/btree.src208
-rw-r--r--storage/bdb/build_unix/.IGNORE_ME3
-rw-r--r--storage/bdb/build_vxworks/BerkeleyDB.wsp29
-rw-r--r--storage/bdb/build_vxworks/dbdemo/README39
-rw-r--r--storage/bdb/build_win32/Berkeley_DB.dsw568
-rw-r--r--storage/bdb/build_win32/app_dsp.src145
-rw-r--r--storage/bdb/build_win32/build_all.dsp96
-rw-r--r--storage/bdb/build_win32/db_java_xa.dsp85
-rw-r--r--storage/bdb/build_win32/db_java_xaj.mak21
-rw-r--r--storage/bdb/build_win32/db_lib.dsp92
-rw-r--r--storage/bdb/build_win32/db_test.src97
-rw-r--r--storage/bdb/build_win32/dbkill.cpp131
-rw-r--r--storage/bdb/build_win32/dllmain.c97
-rw-r--r--storage/bdb/build_win32/dynamic_dsp.src93
-rw-r--r--storage/bdb/build_win32/java_dsp.src129
-rw-r--r--storage/bdb/build_win32/libdb_tcl.def27
-rw-r--r--storage/bdb/build_win32/libdbrc.src33
-rw-r--r--storage/bdb/build_win32/srcfile_dsp.src4
-rw-r--r--storage/bdb/build_win32/static_dsp.src85
-rw-r--r--storage/bdb/build_win32/tcl_dsp.src93
-rw-r--r--storage/bdb/clib/getcwd.c272
-rw-r--r--storage/bdb/clib/getopt.c154
-rw-r--r--storage/bdb/clib/memcmp.c67
-rw-r--r--storage/bdb/clib/memmove.c155
-rw-r--r--storage/bdb/clib/raise.c36
-rw-r--r--storage/bdb/clib/snprintf.c74
-rw-r--r--storage/bdb/clib/strcasecmp.c132
-rw-r--r--storage/bdb/clib/strdup.c67
-rw-r--r--storage/bdb/clib/strerror.c77
-rw-r--r--storage/bdb/clib/vsnprintf.c47
-rw-r--r--storage/bdb/common/db_byteorder.c74
-rw-r--r--storage/bdb/common/db_err.c579
-rw-r--r--storage/bdb/common/db_getlong.c154
-rw-r--r--storage/bdb/common/db_idspace.c93
-rw-r--r--storage/bdb/common/db_log2.c64
-rw-r--r--storage/bdb/common/util_arg.c126
-rw-r--r--storage/bdb/common/util_cache.c92
-rw-r--r--storage/bdb/common/util_log.c64
-rw-r--r--storage/bdb/common/util_sig.c86
-rw-r--r--storage/bdb/cxx/cxx_db.cpp605
-rw-r--r--storage/bdb/cxx/cxx_dbc.cpp115
-rw-r--r--storage/bdb/cxx/cxx_dbt.cpp61
-rw-r--r--storage/bdb/cxx/cxx_env.cpp802
-rw-r--r--storage/bdb/cxx/cxx_except.cpp330
-rw-r--r--storage/bdb/cxx/cxx_lock.cpp45
-rw-r--r--storage/bdb/cxx/cxx_logc.cpp65
-rw-r--r--storage/bdb/cxx/cxx_mpool.cpp110
-rw-r--r--storage/bdb/cxx/cxx_txn.cpp81
-rw-r--r--storage/bdb/db/crdel.src46
-rw-r--r--storage/bdb/db/crdel_rec.c97
-rw-r--r--storage/bdb/db/db.c1308
-rw-r--r--storage/bdb/db/db.src195
-rw-r--r--storage/bdb/db/db_am.c1271
-rw-r--r--storage/bdb/db/db_cam.c2286
-rw-r--r--storage/bdb/db/db_conv.c550
-rw-r--r--storage/bdb/db/db_dispatch.c1404
-rw-r--r--storage/bdb/db/db_dup.c281
-rw-r--r--storage/bdb/db/db_iface.c983
-rw-r--r--storage/bdb/db/db_join.c822
-rw-r--r--storage/bdb/db/db_meta.c452
-rw-r--r--storage/bdb/db/db_method.c691
-rw-r--r--storage/bdb/db/db_open.c703
-rw-r--r--storage/bdb/db/db_overflow.c726
-rw-r--r--storage/bdb/db/db_pr.c1294
-rw-r--r--storage/bdb/db/db_rec.c897
-rw-r--r--storage/bdb/db/db_reclaim.c248
-rw-r--r--storage/bdb/db/db_remove.c318
-rw-r--r--storage/bdb/db/db_rename.c297
-rw-r--r--storage/bdb/db/db_ret.c154
-rw-r--r--storage/bdb/db/db_truncate.c95
-rw-r--r--storage/bdb/db/db_upg.c341
-rw-r--r--storage/bdb/db/db_upg_opd.c352
-rw-r--r--storage/bdb/db/db_vrfy.c2462
-rw-r--r--storage/bdb/db/db_vrfyutil.c872
-rw-r--r--storage/bdb/db185/db185.c594
-rw-r--r--storage/bdb/db185/db185_int.in129
-rw-r--r--storage/bdb/db_archive/db_archive.c180
-rw-r--r--storage/bdb/db_checkpoint/db_checkpoint.c243
-rw-r--r--storage/bdb/db_deadlock/db_deadlock.c234
-rw-r--r--storage/bdb/db_dump/db_dump.c611
-rw-r--r--storage/bdb/db_dump185/db_dump185.c355
-rw-r--r--storage/bdb/db_load/db_load.c1232
-rw-r--r--storage/bdb/db_printlog/README34
-rw-r--r--storage/bdb/db_printlog/commit.awk7
-rw-r--r--storage/bdb/db_printlog/count.awk9
-rw-r--r--storage/bdb/db_printlog/db_printlog.c360
-rw-r--r--storage/bdb/db_printlog/dbname.awk79
-rw-r--r--storage/bdb/db_printlog/fileid.awk37
-rw-r--r--storage/bdb/db_printlog/logstat.awk36
-rw-r--r--storage/bdb/db_printlog/pgno.awk47
-rw-r--r--storage/bdb/db_printlog/range.awk27
-rw-r--r--storage/bdb/db_printlog/rectype.awk27
-rw-r--r--storage/bdb/db_printlog/status.awk46
-rw-r--r--storage/bdb/db_printlog/txn.awk34
-rw-r--r--storage/bdb/db_recover/db_recover.c313
-rw-r--r--storage/bdb/db_stat/db_stat.c1267
-rw-r--r--storage/bdb/db_upgrade/db_upgrade.c190
-rw-r--r--storage/bdb/db_verify/db_verify.c248
-rw-r--r--storage/bdb/dbinc/btree.h320
-rw-r--r--storage/bdb/dbinc/crypto.h78
-rw-r--r--storage/bdb/dbinc/cxx_common.h45
-rw-r--r--storage/bdb/dbinc/cxx_except.h141
-rw-r--r--storage/bdb/dbinc/cxx_int.h81
-rw-r--r--storage/bdb/dbinc/db.in1883
-rw-r--r--storage/bdb/dbinc/db_185.in169
-rw-r--r--storage/bdb/dbinc/db_am.h127
-rw-r--r--storage/bdb/dbinc/db_cxx.in795
-rw-r--r--storage/bdb/dbinc/db_dispatch.h105
-rw-r--r--storage/bdb/dbinc/db_int.in473
-rw-r--r--storage/bdb/dbinc/db_join.h31
-rw-r--r--storage/bdb/dbinc/db_page.h651
-rw-r--r--storage/bdb/dbinc/db_server_int.h148
-rw-r--r--storage/bdb/dbinc/db_shash.h81
-rw-r--r--storage/bdb/dbinc/db_swap.h116
-rw-r--r--storage/bdb/dbinc/db_upgrade.h242
-rw-r--r--storage/bdb/dbinc/db_verify.h205
-rw-r--r--storage/bdb/dbinc/debug.h198
-rw-r--r--storage/bdb/dbinc/fop.h16
-rw-r--r--storage/bdb/dbinc/globals.h83
-rw-r--r--storage/bdb/dbinc/hash.h147
-rw-r--r--storage/bdb/dbinc/hmac.h32
-rw-r--r--storage/bdb/dbinc/lock.h212
-rw-r--r--storage/bdb/dbinc/log.h273
-rw-r--r--storage/bdb/dbinc/mp.h293
-rw-r--r--storage/bdb/dbinc/mutex.h879
-rw-r--r--storage/bdb/dbinc/os.h54
-rw-r--r--storage/bdb/dbinc/qam.h156
-rw-r--r--storage/bdb/dbinc/queue.h319
-rw-r--r--storage/bdb/dbinc/region.h304
-rw-r--r--storage/bdb/dbinc/rep.h184
-rw-r--r--storage/bdb/dbinc/shqueue.h337
-rw-r--r--storage/bdb/dbinc/tcl_db.h261
-rw-r--r--storage/bdb/dbinc/txn.h143
-rw-r--r--storage/bdb/dbinc/xa.h179
-rw-r--r--storage/bdb/dbm/dbm.c519
-rw-r--r--storage/bdb/dbreg/dbreg.c450
-rw-r--r--storage/bdb/dbreg/dbreg.src49
-rw-r--r--storage/bdb/dbreg/dbreg_rec.c362
-rw-r--r--storage/bdb/dbreg/dbreg_util.c797
-rw-r--r--storage/bdb/dist/Makefile.in1397
-rw-r--r--storage/bdb/dist/RELEASE28
-rw-r--r--storage/bdb/dist/aclocal/config.ac51
-rw-r--r--storage/bdb/dist/aclocal/cxx.ac17
-rw-r--r--storage/bdb/dist/aclocal/gcc.ac36
-rw-r--r--storage/bdb/dist/aclocal/libtool.ac3633
-rw-r--r--storage/bdb/dist/aclocal/mutex.ac611
-rw-r--r--storage/bdb/dist/aclocal/options.ac197
-rw-r--r--storage/bdb/dist/aclocal/programs.ac80
-rw-r--r--storage/bdb/dist/aclocal/sosuffix.ac69
-rw-r--r--storage/bdb/dist/aclocal/tcl.ac136
-rw-r--r--storage/bdb/dist/aclocal/types.ac146
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_class.ac107
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_classpath.ac23
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_junit.ac54
-rw-r--r--storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac26
-rw-r--r--storage/bdb/dist/aclocal_java/ac_java_options.ac32
-rw-r--r--storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac112
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_jar.ac36
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_java.ac77
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_java_works.ac97
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javac.ac43
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac35
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac37
-rw-r--r--storage/bdb/dist/aclocal_java/ac_prog_javah.ac26
-rw-r--r--storage/bdb/dist/aclocal_java/ac_try_compile_java.ac39
-rw-r--r--storage/bdb/dist/aclocal_java/ac_try_run_javac.ac40
-rw-r--r--storage/bdb/dist/buildrel109
-rwxr-xr-xstorage/bdb/dist/config.guess1354
-rwxr-xr-xstorage/bdb/dist/config.sub1460
-rw-r--r--storage/bdb/dist/configure.ac611
-rw-r--r--storage/bdb/dist/db.ecd.in64
-rw-r--r--storage/bdb/dist/db.spec.in52
-rw-r--r--storage/bdb/dist/gen_inc.awk73
-rw-r--r--storage/bdb/dist/gen_rec.awk844
-rw-r--r--storage/bdb/dist/gen_rpc.awk1214
-rwxr-xr-xstorage/bdb/dist/install-sh251
-rw-r--r--storage/bdb/dist/ltmain.sh4999
-rw-r--r--storage/bdb/dist/pubdef.in350
-rw-r--r--storage/bdb/dist/s_all23
-rwxr-xr-xstorage/bdb/dist/s_config45
-rw-r--r--storage/bdb/dist/s_crypto59
-rw-r--r--storage/bdb/dist/s_dir42
-rwxr-xr-xstorage/bdb/dist/s_include160
-rwxr-xr-xstorage/bdb/dist/s_java273
-rwxr-xr-xstorage/bdb/dist/s_javah55
-rwxr-xr-xstorage/bdb/dist/s_perm49
-rwxr-xr-xstorage/bdb/dist/s_readme23
-rwxr-xr-xstorage/bdb/dist/s_recover69
-rw-r--r--storage/bdb/dist/s_rpc134
-rwxr-xr-xstorage/bdb/dist/s_symlink59
-rwxr-xr-xstorage/bdb/dist/s_tags62
-rwxr-xr-xstorage/bdb/dist/s_test92
-rw-r--r--storage/bdb/dist/s_vxworks324
-rwxr-xr-xstorage/bdb/dist/s_win32108
-rw-r--r--storage/bdb/dist/s_win32_dsp109
-rw-r--r--storage/bdb/dist/srcfiles.in332
-rw-r--r--storage/bdb/dist/template/rec_ctemp62
-rw-r--r--storage/bdb/dist/vx_2.0/BerkeleyDB.wpj251
-rw-r--r--storage/bdb/dist/vx_2.0/wpj.in160
-rw-r--r--storage/bdb/dist/vx_3.1/Makefile.custom51
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.112
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.29
-rw-r--r--storage/bdb/dist/vx_3.1/cdf.32
-rw-r--r--storage/bdb/dist/vx_3.1/component.cdf30
-rw-r--r--storage/bdb/dist/vx_3.1/component.wpj475
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.122
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.2130
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.3128
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.4135
-rw-r--r--storage/bdb/dist/vx_3.1/wpj.522
-rwxr-xr-xstorage/bdb/dist/vx_buildcd119
-rw-r--r--storage/bdb/dist/vx_config.in381
-rw-r--r--storage/bdb/dist/vx_setup/CONFIG.in10
-rw-r--r--storage/bdb/dist/vx_setup/LICENSE.TXT3
-rw-r--r--storage/bdb/dist/vx_setup/MESSAGES.TCL651
-rw-r--r--storage/bdb/dist/vx_setup/README.in7
-rw-r--r--storage/bdb/dist/vx_setup/SETUP.BMPbin0 -> 187962 bytes
-rw-r--r--storage/bdb/dist/vx_setup/vx_allfile.in5
-rw-r--r--storage/bdb/dist/vx_setup/vx_demofile.in3
-rw-r--r--storage/bdb/dist/vx_setup/vx_setup.in13
-rw-r--r--storage/bdb/dist/win_config.in439
-rw-r--r--storage/bdb/dist/win_exports.in134
-rw-r--r--storage/bdb/env/db_salloc.c338
-rw-r--r--storage/bdb/env/db_shash.c125
-rw-r--r--storage/bdb/env/env_file.c166
-rw-r--r--storage/bdb/env/env_method.c643
-rw-r--r--storage/bdb/env/env_method.c.b643
-rw-r--r--storage/bdb/env/env_open.c1191
-rw-r--r--storage/bdb/env/env_recover.c790
-rw-r--r--storage/bdb/env/env_region.c1256
-rw-r--r--storage/bdb/fileops/fileops.src111
-rw-r--r--storage/bdb/fileops/fop_basic.c275
-rw-r--r--storage/bdb/fileops/fop_rec.c308
-rw-r--r--storage/bdb/fileops/fop_util.c928
-rw-r--r--storage/bdb/hash/hash.c2062
-rw-r--r--storage/bdb/hash/hash.src266
-rw-r--r--storage/bdb/hash/hash_conv.c116
-rw-r--r--storage/bdb/hash/hash_dup.c891
-rw-r--r--storage/bdb/hash/hash_func.c245
-rw-r--r--storage/bdb/hash/hash_meta.c125
-rw-r--r--storage/bdb/hash/hash_method.c126
-rw-r--r--storage/bdb/hash/hash_open.c558
-rw-r--r--storage/bdb/hash/hash_page.c1862
-rw-r--r--storage/bdb/hash/hash_rec.c1156
-rw-r--r--storage/bdb/hash/hash_reclaim.c111
-rw-r--r--storage/bdb/hash/hash_stat.c372
-rw-r--r--storage/bdb/hash/hash_upgrade.c266
-rw-r--r--storage/bdb/hash/hash_verify.c1079
-rw-r--r--storage/bdb/hmac/hmac.c207
-rw-r--r--storage/bdb/hmac/sha1.c294
-rw-r--r--storage/bdb/hsearch/hsearch.c160
-rw-r--r--storage/bdb/libdb_java/checkapi.prl134
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Db.h598
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h581
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLock.h21
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h37
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h29
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h61
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h22
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Dbc.h77
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_Dbt.h37
-rw-r--r--storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h95
-rw-r--r--storage/bdb/libdb_java/java_Db.c982
-rw-r--r--storage/bdb/libdb_java/java_DbEnv.c1450
-rw-r--r--storage/bdb/libdb_java/java_DbLock.c30
-rw-r--r--storage/bdb/libdb_java/java_DbLogc.c110
-rw-r--r--storage/bdb/libdb_java/java_DbLsn.c43
-rw-r--r--storage/bdb/libdb_java/java_DbTxn.c67
-rw-r--r--storage/bdb/libdb_java/java_DbUtil.c27
-rw-r--r--storage/bdb/libdb_java/java_DbXAResource.c288
-rw-r--r--storage/bdb/libdb_java/java_Dbc.c278
-rw-r--r--storage/bdb/libdb_java/java_Dbt.c59
-rw-r--r--storage/bdb/libdb_java/java_info.c1125
-rw-r--r--storage/bdb/libdb_java/java_info.h221
-rw-r--r--storage/bdb/libdb_java/java_locked.c321
-rw-r--r--storage/bdb/libdb_java/java_locked.h82
-rw-r--r--storage/bdb/libdb_java/java_util.c890
-rw-r--r--storage/bdb/libdb_java/java_util.h441
-rw-r--r--storage/bdb/lock/Design301
-rw-r--r--storage/bdb/lock/lock.c1874
-rw-r--r--storage/bdb/lock/lock_deadlock.c886
-rw-r--r--storage/bdb/lock/lock_method.c275
-rw-r--r--storage/bdb/lock/lock_region.c417
-rw-r--r--storage/bdb/lock/lock_stat.c398
-rw-r--r--storage/bdb/lock/lock_util.c138
-rw-r--r--storage/bdb/log/log.c1084
-rw-r--r--storage/bdb/log/log_archive.c486
-rw-r--r--storage/bdb/log/log_compare.c36
-rw-r--r--storage/bdb/log/log_get.c1058
-rw-r--r--storage/bdb/log/log_method.c188
-rw-r--r--storage/bdb/log/log_put.c1250
-rw-r--r--storage/bdb/mp/mp_alloc.c442
-rw-r--r--storage/bdb/mp/mp_bh.c646
-rw-r--r--storage/bdb/mp/mp_fget.c654
-rw-r--r--storage/bdb/mp/mp_fopen.c1018
-rw-r--r--storage/bdb/mp/mp_fput.c202
-rw-r--r--storage/bdb/mp/mp_fset.c89
-rw-r--r--storage/bdb/mp/mp_method.c156
-rw-r--r--storage/bdb/mp/mp_region.c466
-rw-r--r--storage/bdb/mp/mp_register.c76
-rw-r--r--storage/bdb/mp/mp_stat.c491
-rw-r--r--storage/bdb/mp/mp_sync.c627
-rw-r--r--storage/bdb/mp/mp_trickle.c83
-rw-r--r--storage/bdb/mutex/README108
-rw-r--r--storage/bdb/mutex/mut_fcntl.c184
-rw-r--r--storage/bdb/mutex/mut_pthread.c361
-rw-r--r--storage/bdb/mutex/mut_tas.c199
-rw-r--r--storage/bdb/mutex/mut_win32.c257
-rw-r--r--storage/bdb/mutex/mutex.c395
-rw-r--r--storage/bdb/mutex/tm.c627
-rw-r--r--storage/bdb/mutex/uts4_cc.s27
-rw-r--r--storage/bdb/os/os_abs.c31
-rw-r--r--storage/bdb/os/os_alloc.c458
-rw-r--r--storage/bdb/os/os_clock.c92
-rw-r--r--storage/bdb/os/os_config.c31
-rw-r--r--storage/bdb/os/os_dir.c108
-rw-r--r--storage/bdb/os/os_errno.c64
-rw-r--r--storage/bdb/os/os_fid.c148
-rw-r--r--storage/bdb/os/os_fsync.c89
-rw-r--r--storage/bdb/os/os_handle.c185
-rw-r--r--storage/bdb/os/os_id.c47
-rw-r--r--storage/bdb/os/os_map.c443
-rw-r--r--storage/bdb/os/os_method.c234
-rw-r--r--storage/bdb/os/os_oflags.c118
-rw-r--r--storage/bdb/os/os_open.c257
-rw-r--r--storage/bdb/os/os_region.c115
-rw-r--r--storage/bdb/os/os_rename.c47
-rw-r--r--storage/bdb/os/os_root.c36
-rw-r--r--storage/bdb/os/os_rpath.c69
-rw-r--r--storage/bdb/os/os_rw.c148
-rw-r--r--storage/bdb/os/os_seek.c77
-rw-r--r--storage/bdb/os/os_sleep.c80
-rw-r--r--storage/bdb/os/os_spin.c113
-rw-r--r--storage/bdb/os/os_stat.c119
-rw-r--r--storage/bdb/os/os_tmpdir.c121
-rw-r--r--storage/bdb/os/os_unlink.c109
-rw-r--r--storage/bdb/os_vxworks/os_vx_abs.c45
-rw-r--r--storage/bdb/os_vxworks/os_vx_config.c31
-rw-r--r--storage/bdb/os_vxworks/os_vx_map.c441
-rw-r--r--storage/bdb/os_win32/os_abs.c33
-rw-r--r--storage/bdb/os_win32/os_clock.c37
-rw-r--r--storage/bdb/os_win32/os_config.c29
-rw-r--r--storage/bdb/os_win32/os_dir.c86
-rw-r--r--storage/bdb/os_win32/os_errno.c145
-rw-r--r--storage/bdb/os_win32/os_fid.c143
-rw-r--r--storage/bdb/os_win32/os_fsync.c59
-rw-r--r--storage/bdb/os_win32/os_handle.c126
-rw-r--r--storage/bdb/os_win32/os_map.c338
-rw-r--r--storage/bdb/os_win32/os_open.c217
-rw-r--r--storage/bdb/os_win32/os_rename.c77
-rw-r--r--storage/bdb/os_win32/os_rw.c182
-rw-r--r--storage/bdb/os_win32/os_seek.c88
-rw-r--r--storage/bdb/os_win32/os_sleep.c40
-rw-r--r--storage/bdb/os_win32/os_spin.c59
-rw-r--r--storage/bdb/os_win32/os_stat.c100
-rw-r--r--storage/bdb/os_win32/os_type.c36
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm1506
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod1792
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P1559
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs3643
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm8
-rw-r--r--storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm8
-rw-r--r--storage/bdb/perl/BerkeleyDB/Changes167
-rw-r--r--storage/bdb/perl/BerkeleyDB/MANIFEST56
-rw-r--r--storage/bdb/perl/BerkeleyDB/Makefile.PL123
-rw-r--r--storage/bdb/perl/BerkeleyDB/README484
-rw-r--r--storage/bdb/perl/BerkeleyDB/Todo57
-rw-r--r--storage/bdb/perl/BerkeleyDB/config.in43
-rw-r--r--storage/bdb/perl/BerkeleyDB/constants.h4046
-rw-r--r--storage/bdb/perl/BerkeleyDB/constants.xs87
-rwxr-xr-xstorage/bdb/perl/BerkeleyDB/dbinfo112
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl1
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl1
-rw-r--r--storage/bdb/perl/BerkeleyDB/hints/solaris.pl1
-rw-r--r--storage/bdb/perl/BerkeleyDB/mkconsts770
-rwxr-xr-xstorage/bdb/perl/BerkeleyDB/mkpod146
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.00444
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_01217
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_02217
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_03223
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_04209
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.004_05209
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005209
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_01209
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_02264
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.005_03250
-rw-r--r--storage/bdb/perl/BerkeleyDB/patches/5.6.0294
-rw-r--r--storage/bdb/perl/BerkeleyDB/ppport.h329
-rw-r--r--storage/bdb/perl/BerkeleyDB/scan229
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/btree.t931
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/destroy.t105
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/env.t217
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples.t401
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples.t.T415
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples3.t132
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/examples3.t.T136
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/filter.t217
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/hash.t728
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/join.t225
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/mldbm.t161
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/queue.t763
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/recno.t913
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/strict.t174
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/subdb.t243
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/txn.t320
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/unknown.t176
-rw-r--r--storage/bdb/perl/BerkeleyDB/t/util.pm220
-rw-r--r--storage/bdb/perl/BerkeleyDB/typemap275
-rw-r--r--storage/bdb/perl/DB_File/Changes434
-rw-r--r--storage/bdb/perl/DB_File/DB_File.pm2291
-rw-r--r--storage/bdb/perl/DB_File/DB_File.xs1951
-rw-r--r--storage/bdb/perl/DB_File/DB_File_BS6
-rw-r--r--storage/bdb/perl/DB_File/MANIFEST30
-rw-r--r--storage/bdb/perl/DB_File/Makefile.PL330
-rw-r--r--storage/bdb/perl/DB_File/README458
-rw-r--r--storage/bdb/perl/DB_File/config.in97
-rw-r--r--storage/bdb/perl/DB_File/dbinfo112
-rw-r--r--storage/bdb/perl/DB_File/fallback.h455
-rw-r--r--storage/bdb/perl/DB_File/fallback.xs88
-rw-r--r--storage/bdb/perl/DB_File/hints/dynixptx.pl3
-rw-r--r--storage/bdb/perl/DB_File/hints/sco.pl2
-rw-r--r--storage/bdb/perl/DB_File/patches/5.00444
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_01217
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_02217
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_03223
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_04209
-rw-r--r--storage/bdb/perl/DB_File/patches/5.004_05209
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005209
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_01209
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_02264
-rw-r--r--storage/bdb/perl/DB_File/patches/5.005_03250
-rw-r--r--storage/bdb/perl/DB_File/patches/5.6.0294
-rw-r--r--storage/bdb/perl/DB_File/ppport.h329
-rw-r--r--storage/bdb/perl/DB_File/t/db-btree.t1489
-rw-r--r--storage/bdb/perl/DB_File/t/db-hash.t981
-rw-r--r--storage/bdb/perl/DB_File/t/db-recno.t1428
-rw-r--r--storage/bdb/perl/DB_File/typemap46
-rw-r--r--storage/bdb/perl/DB_File/version.c82
-rw-r--r--storage/bdb/qam/qam.c1615
-rw-r--r--storage/bdb/qam/qam.src101
-rw-r--r--storage/bdb/qam/qam_conv.c84
-rw-r--r--storage/bdb/qam/qam_files.c642
-rw-r--r--storage/bdb/qam/qam_method.c413
-rw-r--r--storage/bdb/qam/qam_open.c331
-rw-r--r--storage/bdb/qam/qam_rec.c568
-rw-r--r--storage/bdb/qam/qam_stat.c203
-rw-r--r--storage/bdb/qam/qam_upgrade.c108
-rw-r--r--storage/bdb/qam/qam_verify.c200
-rw-r--r--storage/bdb/rep/rep_method.c1144
-rw-r--r--storage/bdb/rep/rep_record.c1513
-rw-r--r--storage/bdb/rep/rep_region.c187
-rw-r--r--storage/bdb/rep/rep_util.c867
-rw-r--r--storage/bdb/rpc_client/client.c464
-rw-r--r--storage/bdb/rpc_client/gen_client_ret.c824
-rw-r--r--storage/bdb/rpc_server/c/db_server_proc.c.in2500
-rw-r--r--storage/bdb/rpc_server/c/db_server_util.c815
-rw-r--r--storage/bdb/rpc_server/clsrv.html453
-rw-r--r--storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp2200
-rw-r--r--storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp746
-rw-r--r--storage/bdb/rpc_server/java/DbDispatcher.java590
-rw-r--r--storage/bdb/rpc_server/java/DbServer.java301
-rw-r--r--storage/bdb/rpc_server/java/FreeList.java102
-rw-r--r--storage/bdb/rpc_server/java/LocalIterator.java23
-rw-r--r--storage/bdb/rpc_server/java/README24
-rw-r--r--storage/bdb/rpc_server/java/RpcDb.java694
-rw-r--r--storage/bdb/rpc_server/java/RpcDbEnv.java269
-rw-r--r--storage/bdb/rpc_server/java/RpcDbTxn.java123
-rw-r--r--storage/bdb/rpc_server/java/RpcDbc.java238
-rw-r--r--storage/bdb/rpc_server/java/Timer.java22
-rw-r--r--storage/bdb/rpc_server/java/gen/DbServerStub.java495
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_associate_msg.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_associate_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_close_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_close_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_create_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_create_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_cursor_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_cursor_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_del_msg.java53
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_del_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_flags_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_flags_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_get_msg.java68
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_get_reply.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_join_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_join_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_key_range_msg.java53
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_key_range_reply.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_lorder_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_lorder_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_open_msg.java50
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_open_reply.java44
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pget_msg.java83
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_pget_reply.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_put_msg.java68
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_put_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_len_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_len_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_remove_msg.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_remove_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_rename_msg.java44
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_rename_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_stat_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_stat_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_sync_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_sync_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_truncate_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__db_truncate_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_close_msg.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_close_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_count_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_count_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_del_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_del_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_get_msg.java65
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_get_reply.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java80
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_put_msg.java65
-rw-r--r--storage/bdb/rpc_server/java/gen/__dbc_put_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_close_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_close_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_create_msg.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_create_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java44
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java47
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_flags_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_flags_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_open_msg.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_open_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_remove_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__env_remove_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_abort_msg.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_abort_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_begin_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_begin_reply.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_commit_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_commit_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_discard_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_discard_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java35
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java32
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_recover_msg.java38
-rw-r--r--storage/bdb/rpc_server/java/gen/__txn_recover_reply.java41
-rw-r--r--storage/bdb/rpc_server/java/gen/db_server.java67
-rw-r--r--storage/bdb/rpc_server/java/jrpcgen.jarbin0 -> 57125 bytes
-rw-r--r--storage/bdb/rpc_server/java/oncrpc.jarbin0 -> 84122 bytes
-rw-r--r--storage/bdb/rpc_server/java/s_jrpcgen3
-rw-r--r--storage/bdb/rpc_server/rpc.src718
-rw-r--r--storage/bdb/tcl/docs/db.html263
-rw-r--r--storage/bdb/tcl/docs/env.html354
-rw-r--r--storage/bdb/tcl/docs/historic.html169
-rw-r--r--storage/bdb/tcl/docs/index.html51
-rw-r--r--storage/bdb/tcl/docs/library.html27
-rw-r--r--storage/bdb/tcl/docs/lock.html207
-rw-r--r--storage/bdb/tcl/docs/log.html124
-rw-r--r--storage/bdb/tcl/docs/mpool.html190
-rw-r--r--storage/bdb/tcl/docs/rep.html51
-rw-r--r--storage/bdb/tcl/docs/test.html150
-rw-r--r--storage/bdb/tcl/docs/txn.html67
-rw-r--r--storage/bdb/tcl/tcl_compat.c746
-rw-r--r--storage/bdb/tcl/tcl_db.c2421
-rw-r--r--storage/bdb/tcl/tcl_db_pkg.c3117
-rw-r--r--storage/bdb/tcl/tcl_dbcursor.c924
-rw-r--r--storage/bdb/tcl/tcl_env.c1310
-rw-r--r--storage/bdb/tcl/tcl_internal.c717
-rw-r--r--storage/bdb/tcl/tcl_lock.c739
-rw-r--r--storage/bdb/tcl/tcl_log.c610
-rw-r--r--storage/bdb/tcl/tcl_mp.c864
-rw-r--r--storage/bdb/tcl/tcl_rep.c405
-rw-r--r--storage/bdb/tcl/tcl_txn.c657
-rw-r--r--storage/bdb/tcl/tcl_util.c381
-rw-r--r--storage/bdb/test/archive.tcl230
-rw-r--r--storage/bdb/test/bigfile001.tcl85
-rw-r--r--storage/bdb/test/bigfile002.tcl45
-rw-r--r--storage/bdb/test/byteorder.tcl34
-rw-r--r--storage/bdb/test/conscript.tcl123
-rw-r--r--storage/bdb/test/dbm.tcl128
-rw-r--r--storage/bdb/test/dbscript.tcl357
-rw-r--r--storage/bdb/test/ddoyscript.tcl172
-rw-r--r--storage/bdb/test/ddscript.tcl44
-rw-r--r--storage/bdb/test/dead001.tcl88
-rw-r--r--storage/bdb/test/dead002.tcl75
-rw-r--r--storage/bdb/test/dead003.tcl98
-rw-r--r--storage/bdb/test/dead004.tcl108
-rw-r--r--storage/bdb/test/dead005.tcl87
-rw-r--r--storage/bdb/test/dead006.tcl16
-rw-r--r--storage/bdb/test/dead007.tcl34
-rw-r--r--storage/bdb/test/env001.tcl154
-rw-r--r--storage/bdb/test/env002.tcl156
-rw-r--r--storage/bdb/test/env003.tcl149
-rw-r--r--storage/bdb/test/env004.tcl103
-rw-r--r--storage/bdb/test/env005.tcl53
-rw-r--r--storage/bdb/test/env006.tcl42
-rw-r--r--storage/bdb/test/env007.tcl223
-rw-r--r--storage/bdb/test/env008.tcl73
-rw-r--r--storage/bdb/test/env009.tcl57
-rw-r--r--storage/bdb/test/env010.tcl49
-rw-r--r--storage/bdb/test/env011.tcl39
-rw-r--r--storage/bdb/test/hsearch.tcl51
-rw-r--r--storage/bdb/test/join.tcl455
-rw-r--r--storage/bdb/test/lock001.tcl122
-rw-r--r--storage/bdb/test/lock002.tcl157
-rw-r--r--storage/bdb/test/lock003.tcl99
-rw-r--r--storage/bdb/test/lock004.tcl29
-rw-r--r--storage/bdb/test/lock005.tcl177
-rw-r--r--storage/bdb/test/lockscript.tcl117
-rw-r--r--storage/bdb/test/log001.tcl120
-rw-r--r--storage/bdb/test/log002.tcl85
-rw-r--r--storage/bdb/test/log003.tcl118
-rw-r--r--storage/bdb/test/log004.tcl46
-rw-r--r--storage/bdb/test/log005.tcl89
-rw-r--r--storage/bdb/test/logtrack.tcl137
-rw-r--r--storage/bdb/test/mdbscript.tcl384
-rw-r--r--storage/bdb/test/memp001.tcl199
-rw-r--r--storage/bdb/test/memp002.tcl62
-rw-r--r--storage/bdb/test/memp003.tcl153
-rw-r--r--storage/bdb/test/mpoolscript.tcl171
-rw-r--r--storage/bdb/test/mutex001.tcl51
-rw-r--r--storage/bdb/test/mutex002.tcl94
-rw-r--r--storage/bdb/test/mutex003.tcl52
-rw-r--r--storage/bdb/test/mutexscript.tcl91
-rw-r--r--storage/bdb/test/ndbm.tcl144
-rw-r--r--storage/bdb/test/parallel.tcl295
-rw-r--r--storage/bdb/test/recd001.tcl242
-rw-r--r--storage/bdb/test/recd002.tcl103
-rw-r--r--storage/bdb/test/recd003.tcl119
-rw-r--r--storage/bdb/test/recd004.tcl95
-rw-r--r--storage/bdb/test/recd005.tcl230
-rw-r--r--storage/bdb/test/recd006.tcl262
-rw-r--r--storage/bdb/test/recd007.tcl886
-rw-r--r--storage/bdb/test/recd008.tcl227
-rw-r--r--storage/bdb/test/recd009.tcl180
-rw-r--r--storage/bdb/test/recd010.tcl257
-rw-r--r--storage/bdb/test/recd011.tcl116
-rw-r--r--storage/bdb/test/recd012.tcl432
-rw-r--r--storage/bdb/test/recd013.tcl287
-rw-r--r--storage/bdb/test/recd014.tcl445
-rw-r--r--storage/bdb/test/recd015.tcl160
-rw-r--r--storage/bdb/test/recd016.tcl183
-rw-r--r--storage/bdb/test/recd017.tcl151
-rw-r--r--storage/bdb/test/recd018.tcl110
-rw-r--r--storage/bdb/test/recd019.tcl121
-rw-r--r--storage/bdb/test/recd020.tcl180
-rw-r--r--storage/bdb/test/recd15scr.tcl74
-rw-r--r--storage/bdb/test/recdscript.tcl37
-rw-r--r--storage/bdb/test/rep001.tcl249
-rw-r--r--storage/bdb/test/rep002.tcl278
-rw-r--r--storage/bdb/test/rep003.tcl221
-rw-r--r--storage/bdb/test/rep004.tcl198
-rw-r--r--storage/bdb/test/rep005.tcl225
-rw-r--r--storage/bdb/test/reputils.tcl659
-rw-r--r--storage/bdb/test/rpc001.tcl449
-rw-r--r--storage/bdb/test/rpc002.tcl143
-rw-r--r--storage/bdb/test/rpc003.tcl166
-rw-r--r--storage/bdb/test/rpc004.tcl76
-rw-r--r--storage/bdb/test/rpc005.tcl137
-rw-r--r--storage/bdb/test/rsrc001.tcl221
-rw-r--r--storage/bdb/test/rsrc002.tcl66
-rw-r--r--storage/bdb/test/rsrc003.tcl173
-rw-r--r--storage/bdb/test/rsrc004.tcl52
-rw-r--r--storage/bdb/test/scr001/chk.code37
-rw-r--r--storage/bdb/test/scr002/chk.def64
-rw-r--r--storage/bdb/test/scr003/chk.define77
-rw-r--r--storage/bdb/test/scr004/chk.javafiles31
-rw-r--r--storage/bdb/test/scr005/chk.nl112
-rw-r--r--storage/bdb/test/scr006/chk.offt36
-rw-r--r--storage/bdb/test/scr007/chk.proto45
-rw-r--r--storage/bdb/test/scr008/chk.pubdef179
-rw-r--r--storage/bdb/test/scr009/chk.srcfiles39
-rw-r--r--storage/bdb/test/scr010/chk.str31
-rw-r--r--storage/bdb/test/scr010/spell.ok825
-rw-r--r--storage/bdb/test/scr011/chk.tags41
-rw-r--r--storage/bdb/test/scr012/chk.vx_code68
-rw-r--r--storage/bdb/test/scr013/chk.stats114
-rw-r--r--storage/bdb/test/scr014/chk.err34
-rw-r--r--storage/bdb/test/scr015/README36
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.cpp330
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testerr4
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testout27
-rw-r--r--storage/bdb/test/scr015/TestExceptInclude.cpp27
-rw-r--r--storage/bdb/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.cpp171
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testin8
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testout19
-rw-r--r--storage/bdb/test/scr015/TestLogc.cpp101
-rw-r--r--storage/bdb/test/scr015/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr015/TestTruncate.cpp84
-rw-r--r--storage/bdb/test/scr015/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr015/chk.cxxtests71
-rw-r--r--storage/bdb/test/scr015/ignore4
-rw-r--r--storage/bdb/test/scr015/testall32
-rw-r--r--storage/bdb/test/scr015/testone122
-rw-r--r--storage/bdb/test/scr016/CallbackTest.java83
-rw-r--r--storage/bdb/test/scr016/CallbackTest.testout60
-rw-r--r--storage/bdb/test/scr016/README37
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.java258
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.testout82
-rw-r--r--storage/bdb/test/scr016/TestAssociate.java333
-rw-r--r--storage/bdb/test/scr016/TestAssociate.testout30
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.java62
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.testout2
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.java474
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testerr0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testout3
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.java326
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.testout3
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.java241
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testerr54
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testout78
-rw-r--r--storage/bdb/test/scr016/TestGetSetMethods.java99
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.java203
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.testout27
-rw-r--r--storage/bdb/test/scr016/TestLockVec.java249
-rw-r--r--storage/bdb/test/scr016/TestLockVec.testout8
-rw-r--r--storage/bdb/test/scr016/TestLogc.java100
-rw-r--r--storage/bdb/test/scr016/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.java189
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--storage/bdb/test/scr016/TestReplication.java289
-rw-r--r--storage/bdb/test/scr016/TestRpcServer.java193
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.java56
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.testout2
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.java37
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr016/TestStat.java57
-rw-r--r--storage/bdb/test/scr016/TestStat.testout11
-rw-r--r--storage/bdb/test/scr016/TestTruncate.java87
-rw-r--r--storage/bdb/test/scr016/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr016/TestUtil.java57
-rw-r--r--storage/bdb/test/scr016/TestXAServlet.java313
-rw-r--r--storage/bdb/test/scr016/chk.javatests79
-rw-r--r--storage/bdb/test/scr016/ignore22
-rw-r--r--storage/bdb/test/scr016/testall32
-rw-r--r--storage/bdb/test/scr016/testone122
-rw-r--r--storage/bdb/test/scr017/O.BH196
-rw-r--r--storage/bdb/test/scr017/O.R196
-rw-r--r--storage/bdb/test/scr017/chk.db18526
-rw-r--r--storage/bdb/test/scr017/t.c188
-rw-r--r--storage/bdb/test/scr018/chk.comma30
-rw-r--r--storage/bdb/test/scr018/t.c46
-rw-r--r--storage/bdb/test/scr019/chk.include40
-rw-r--r--storage/bdb/test/scr020/chk.inc43
-rw-r--r--storage/bdb/test/scr021/chk.flags97
-rw-r--r--storage/bdb/test/scr022/chk.rr22
-rw-r--r--storage/bdb/test/sdb001.tcl156
-rw-r--r--storage/bdb/test/sdb002.tcl221
-rw-r--r--storage/bdb/test/sdb003.tcl179
-rw-r--r--storage/bdb/test/sdb004.tcl241
-rw-r--r--storage/bdb/test/sdb005.tcl146
-rw-r--r--storage/bdb/test/sdb006.tcl169
-rw-r--r--storage/bdb/test/sdb007.tcl132
-rw-r--r--storage/bdb/test/sdb008.tcl121
-rw-r--r--storage/bdb/test/sdb009.tcl108
-rw-r--r--storage/bdb/test/sdb010.tcl166
-rw-r--r--storage/bdb/test/sdb011.tcl143
-rw-r--r--storage/bdb/test/sdb012.tcl428
-rw-r--r--storage/bdb/test/sdbscript.tcl47
-rw-r--r--storage/bdb/test/sdbtest001.tcl150
-rw-r--r--storage/bdb/test/sdbtest002.tcl174
-rw-r--r--storage/bdb/test/sdbutils.tcl197
-rw-r--r--storage/bdb/test/sec001.tcl205
-rw-r--r--storage/bdb/test/sec002.tcl143
-rw-r--r--storage/bdb/test/shelltest.tcl88
-rw-r--r--storage/bdb/test/si001.tcl116
-rw-r--r--storage/bdb/test/si002.tcl167
-rw-r--r--storage/bdb/test/si003.tcl142
-rw-r--r--storage/bdb/test/si004.tcl194
-rw-r--r--storage/bdb/test/si005.tcl179
-rw-r--r--storage/bdb/test/si006.tcl129
-rw-r--r--storage/bdb/test/sindex.tcl259
-rw-r--r--storage/bdb/test/sysscript.tcl282
-rw-r--r--storage/bdb/test/test.tcl1863
-rw-r--r--storage/bdb/test/test001.tcl247
-rw-r--r--storage/bdb/test/test002.tcl161
-rw-r--r--storage/bdb/test/test003.tcl210
-rw-r--r--storage/bdb/test/test004.tcl169
-rw-r--r--storage/bdb/test/test005.tcl19
-rw-r--r--storage/bdb/test/test006.tcl150
-rw-r--r--storage/bdb/test/test007.tcl19
-rw-r--r--storage/bdb/test/test008.tcl200
-rw-r--r--storage/bdb/test/test009.tcl18
-rw-r--r--storage/bdb/test/test010.tcl176
-rw-r--r--storage/bdb/test/test011.tcl470
-rw-r--r--storage/bdb/test/test012.tcl139
-rw-r--r--storage/bdb/test/test013.tcl241
-rw-r--r--storage/bdb/test/test014.tcl253
-rw-r--r--storage/bdb/test/test015.tcl276
-rw-r--r--storage/bdb/test/test016.tcl207
-rw-r--r--storage/bdb/test/test017.tcl306
-rw-r--r--storage/bdb/test/test018.tcl16
-rw-r--r--storage/bdb/test/test019.tcl131
-rw-r--r--storage/bdb/test/test020.tcl137
-rw-r--r--storage/bdb/test/test021.tcl162
-rw-r--r--storage/bdb/test/test022.tcl62
-rw-r--r--storage/bdb/test/test023.tcl221
-rw-r--r--storage/bdb/test/test024.tcl268
-rw-r--r--storage/bdb/test/test025.tcl146
-rw-r--r--storage/bdb/test/test026.tcl155
-rw-r--r--storage/bdb/test/test027.tcl17
-rw-r--r--storage/bdb/test/test028.tcl222
-rw-r--r--storage/bdb/test/test029.tcl245
-rw-r--r--storage/bdb/test/test030.tcl231
-rw-r--r--storage/bdb/test/test031.tcl230
-rw-r--r--storage/bdb/test/test032.tcl231
-rw-r--r--storage/bdb/test/test033.tcl176
-rw-r--r--storage/bdb/test/test034.tcl17
-rw-r--r--storage/bdb/test/test035.tcl16
-rw-r--r--storage/bdb/test/test036.tcl173
-rw-r--r--storage/bdb/test/test037.tcl196
-rw-r--r--storage/bdb/test/test038.tcl227
-rw-r--r--storage/bdb/test/test039.tcl211
-rw-r--r--storage/bdb/test/test040.tcl17
-rw-r--r--storage/bdb/test/test041.tcl17
-rw-r--r--storage/bdb/test/test042.tcl181
-rw-r--r--storage/bdb/test/test043.tcl192
-rw-r--r--storage/bdb/test/test044.tcl250
-rw-r--r--storage/bdb/test/test045.tcl123
-rw-r--r--storage/bdb/test/test046.tcl813
-rw-r--r--storage/bdb/test/test047.tcl258
-rw-r--r--storage/bdb/test/test048.tcl170
-rw-r--r--storage/bdb/test/test049.tcl184
-rw-r--r--storage/bdb/test/test050.tcl221
-rw-r--r--storage/bdb/test/test051.tcl219
-rw-r--r--storage/bdb/test/test052.tcl276
-rw-r--r--storage/bdb/test/test053.tcl225
-rw-r--r--storage/bdb/test/test054.tcl461
-rw-r--r--storage/bdb/test/test055.tcl141
-rw-r--r--storage/bdb/test/test056.tcl169
-rw-r--r--storage/bdb/test/test057.tcl248
-rw-r--r--storage/bdb/test/test058.tcl103
-rw-r--r--storage/bdb/test/test059.tcl150
-rw-r--r--storage/bdb/test/test060.tcl60
-rw-r--r--storage/bdb/test/test061.tcl226
-rw-r--r--storage/bdb/test/test062.tcl153
-rw-r--r--storage/bdb/test/test063.tcl174
-rw-r--r--storage/bdb/test/test064.tcl69
-rw-r--r--storage/bdb/test/test065.tcl199
-rw-r--r--storage/bdb/test/test066.tcl99
-rw-r--r--storage/bdb/test/test067.tcl155
-rw-r--r--storage/bdb/test/test068.tcl226
-rw-r--r--storage/bdb/test/test069.tcl14
-rw-r--r--storage/bdb/test/test070.tcl142
-rw-r--r--storage/bdb/test/test071.tcl16
-rw-r--r--storage/bdb/test/test072.tcl252
-rw-r--r--storage/bdb/test/test073.tcl290
-rw-r--r--storage/bdb/test/test074.tcl271
-rw-r--r--storage/bdb/test/test075.tcl205
-rw-r--r--storage/bdb/test/test076.tcl80
-rw-r--r--storage/bdb/test/test077.tcl93
-rw-r--r--storage/bdb/test/test078.tcl130
-rw-r--r--storage/bdb/test/test079.tcl20
-rw-r--r--storage/bdb/test/test080.tcl126
-rw-r--r--storage/bdb/test/test081.tcl15
-rw-r--r--storage/bdb/test/test082.tcl14
-rw-r--r--storage/bdb/test/test083.tcl162
-rw-r--r--storage/bdb/test/test084.tcl53
-rw-r--r--storage/bdb/test/test085.tcl332
-rw-r--r--storage/bdb/test/test086.tcl166
-rw-r--r--storage/bdb/test/test087.tcl290
-rw-r--r--storage/bdb/test/test088.tcl172
-rw-r--r--storage/bdb/test/test089.tcl180
-rw-r--r--storage/bdb/test/test090.tcl16
-rw-r--r--storage/bdb/test/test091.tcl20
-rw-r--r--storage/bdb/test/test092.tcl241
-rw-r--r--storage/bdb/test/test093.tcl393
-rw-r--r--storage/bdb/test/test094.tcl251
-rw-r--r--storage/bdb/test/test095.tcl296
-rw-r--r--storage/bdb/test/test096.tcl202
-rw-r--r--storage/bdb/test/test097.tcl188
-rw-r--r--storage/bdb/test/test098.tcl91
-rw-r--r--storage/bdb/test/test099.tcl177
-rw-r--r--storage/bdb/test/test100.tcl17
-rw-r--r--storage/bdb/test/test101.tcl17
-rw-r--r--storage/bdb/test/testparams.tcl194
-rw-r--r--storage/bdb/test/testutils.tcl3209
-rw-r--r--storage/bdb/test/txn001.tcl116
-rw-r--r--storage/bdb/test/txn002.tcl91
-rw-r--r--storage/bdb/test/txn003.tcl238
-rw-r--r--storage/bdb/test/txn004.tcl62
-rw-r--r--storage/bdb/test/txn005.tcl75
-rw-r--r--storage/bdb/test/txn006.tcl47
-rw-r--r--storage/bdb/test/txn007.tcl57
-rw-r--r--storage/bdb/test/txn008.tcl32
-rw-r--r--storage/bdb/test/txn009.tcl32
-rw-r--r--storage/bdb/test/txnscript.tcl67
-rw-r--r--storage/bdb/test/update.tcl93
-rw-r--r--storage/bdb/test/upgrade.tcl294
-rw-r--r--storage/bdb/test/wordlist10001
-rw-r--r--storage/bdb/test/wrap.tcl71
-rw-r--r--storage/bdb/txn/txn.c1428
-rw-r--r--storage/bdb/txn/txn.src93
-rw-r--r--storage/bdb/txn/txn_method.c105
-rw-r--r--storage/bdb/txn/txn_rec.c436
-rw-r--r--storage/bdb/txn/txn_recover.c306
-rw-r--r--storage/bdb/txn/txn_region.c374
-rw-r--r--storage/bdb/txn/txn_stat.c102
-rw-r--r--storage/bdb/txn/txn_util.c234
-rw-r--r--storage/bdb/xa/xa.c539
-rw-r--r--storage/bdb/xa/xa_db.c182
-rw-r--r--storage/bdb/xa/xa_map.c167
948 files changed, 268125 insertions, 0 deletions
diff --git a/storage/bdb/LICENSE b/storage/bdb/LICENSE
new file mode 100644
index 00000000000..1cd727bfd98
--- /dev/null
+++ b/storage/bdb/LICENSE
@@ -0,0 +1,102 @@
+/*-
+ * $Id: LICENSE,v 11.9 2002/01/11 15:51:10 bostic Exp $
+ */
+
+The following is the license that applies to this copy of the Berkeley DB
+software. For a license to use the Berkeley DB software under conditions
+other than those described here, or to purchase support for this software,
+please contact Sleepycat Software by email at db@sleepycat.com, or on the
+Web at http://www.sleepycat.com.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+/*
+ * Copyright (c) 1990-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Redistributions in any form must be accompanied by information on
+ * how to obtain complete source code for the DB software and any
+ * accompanying software that uses the DB software. The source code
+ * must either be included in the distribution or be available for no
+ * more than the cost of distribution plus a nominal fee, and must be
+ * freely redistributable under reasonable conditions. For an
+ * executable file, complete source code means the source code for all
+ * modules it contains. It does not include source code for modules or
+ * files that typically accompany the major components of the operating
+ * system on which the executable file runs.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ * NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
diff --git a/storage/bdb/Makefile.in b/storage/bdb/Makefile.in
new file mode 100644
index 00000000000..c83d40ac8b2
--- /dev/null
+++ b/storage/bdb/Makefile.in
@@ -0,0 +1,57 @@
+# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+# Adaptor makefile to translate between what automake expects and what
+# BDB provides (or vice versa).
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+
+# distdir and top_distdir are set by the calling Makefile
+
+bdb_build = build_unix
+files = LICENSE Makefile Makefile.in README
+subdirs = btree build_vxworks build_win32 clib common cxx db dbinc \
+ dbinc_auto db185 db_archive db_checkpoint db_deadlock db_dump \
+ db_dump185 db_load db_printlog db_recover db_stat db_upgrade \
+ db_verify dbm dbreg dist env examples_c examples_cxx fileops hash \
+ hsearch hmac include java libdb_java lock log mp mutex os os_vxworks \
+ os_win32 perl.BerkeleyDB perl.DB_File qam rep rpc_client rpc_server tcl \
+ test txn xa
+
+@SET_MAKE@
+
+all:
+ cd $(bdb_build) && $(MAKE) all
+
+clean:
+ cd $(bdb_build) && $(MAKE) clean
+
+distclean:
+ cd $(bdb_build) && $(MAKE) distclean
+
+# May want to fix this, and MYSQL/configure, to install things
+install dvi check installcheck:
+
+distdir:
+ for s in $(subdirs); do \
+ cp -pr $(srcdir)/$$s $(distdir)/$$s; \
+ done
+ for f in $(files); do \
+ test -f $(distdir)/$$f || cp -p $(srcdir)/$$f $(distdir)/$$f; \
+ done
+ mkdir $(distdir)/$(bdb_build)
+ cp -p $(srcdir)/$(bdb_build)/.IGNORE_ME $(distdir)/$(bdb_build)
diff --git a/storage/bdb/btree/bt_compare.c b/storage/bdb/btree/bt_compare.c
new file mode 100644
index 00000000000..cbe2a1a7170
--- /dev/null
+++ b/storage/bdb/btree/bt_compare.c
@@ -0,0 +1,211 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_compare.c,v 11.17 2002/03/27 04:30:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_cmp --
+ * Compare a key to a given record.
+ *
+ * PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *,
+ * PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__bam_cmp(dbp, dbt, h, indx, func, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ PAGE *h;
+ u_int32_t indx;
+ int (*func)__P((DB *, const DBT *, const DBT *));
+ int *cmpp;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ DBT pg_dbt;
+
+ /*
+ * Returns:
+ * < 0 if dbt is < page record
+ * = 0 if dbt is = page record
+ * > 0 if dbt is > page record
+ *
+ * !!!
+ * We do not clear the pg_dbt DBT even though it's likely to contain
+ * random bits. That should be okay, because the app's comparison
+ * routine had better not be looking at fields other than data/size.
+ * We don't clear it because we go through this path a lot and it's
+ * expensive.
+ */
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)bk;
+ else {
+ pg_dbt.data = bk->data;
+ pg_dbt.size = bk->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ case P_IBTREE:
+ /*
+ * The following code guarantees that the left-most key on an
+ * internal page at any place in the tree sorts less than any
+ * user-specified key. The reason is that if we have reached
+ * this internal page, we know the user key must sort greater
+ * than the key we're storing for this page in any internal
+ * pages at levels above us in the tree. It then follows that
+ * any user-specified key cannot sort less than the first page
+ * which we reference, and so there's no reason to call the
+ * comparison routine. While this may save us a comparison
+ * routine call or two, the real reason for this is because
+ * we don't maintain a copy of the smallest key in the tree,
+ * so that we don't have to update all the levels of the tree
+ * should the application store a new smallest key. And, so,
+ * we may not have a key to compare, which makes doing the
+ * comparison difficult and error prone.
+ */
+ if (indx == 0) {
+ *cmpp = 1;
+ return (0);
+ }
+
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ bo = (BOVERFLOW *)(bi->data);
+ else {
+ pg_dbt.data = bi->data;
+ pg_dbt.size = bi->len;
+ *cmpp = func(dbp, dbt, &pg_dbt);
+ return (0);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ /*
+ * Overflow.
+ */
+ return (__db_moff(dbp, dbt,
+ bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp));
+}
+
+/*
+ * __bam_defcmp --
+ * Default comparison routine.
+ *
+ * PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+ */
+int
+__bam_defcmp(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ /*
+ * Returns:
+ * < 0 if a is < b
+ * = 0 if a is = b
+ * > 0 if a is > b
+ *
+ * XXX
+ * If a size_t doesn't fit into a long, or if the difference between
+ * any two characters doesn't fit into an int, this routine can lose.
+ * What we need is a signed integral type that's guaranteed to be at
+ * least as large as a size_t, and there is no such thing.
+ */
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+/*
+ * __bam_defpfx --
+ * Default prefix routine.
+ *
+ * PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+ */
+size_t
+__bam_defpfx(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ size_t cnt, len;
+ u_int8_t *p1, *p2;
+
+ COMPQUIET(dbp, NULL);
+
+ cnt = 1;
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt)
+ if (*p1 != *p2)
+ return (cnt);
+
+ /*
+ * We know that a->size must be <= b->size, or they wouldn't be
+ * in this order.
+ */
+ return (a->size < b->size ? a->size + 1 : a->size);
+}
diff --git a/storage/bdb/btree/bt_conv.c b/storage/bdb/btree/bt_conv.c
new file mode 100644
index 00000000000..4264b62ffdd
--- /dev/null
+++ b/storage/bdb/btree/bt_conv.c
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_conv.c,v 11.13 2002/08/06 06:11:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgin(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __bam_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__bam_pgout(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __bam_mswap --
+ * Swap the bytes on the btree metadata page.
+ *
+ * PUBLIC: int __bam_mswap __P((PAGE *));
+ */
+int
+__bam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* maxkey */
+ SWAP32(p); /* minkey */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* root */
+ p += 92 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
+
+ return (0);
+}
diff --git a/storage/bdb/btree/bt_curadj.c b/storage/bdb/btree/bt_curadj.c
new file mode 100644
index 00000000000..50d3d422e49
--- /dev/null
+++ b/storage/bdb/btree/bt_curadj.c
@@ -0,0 +1,582 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_curadj.c,v 11.30 2002/07/03 19:03:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t));
+
+#ifdef DEBUG
+/*
+ * __bam_cprint --
+ * Display the current internal cursor.
+ *
+ * PUBLIC: void __bam_cprint __P((DBC *));
+ */
+void
+__bam_cprint(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ fprintf(stderr, "\tinternal: ovflsize: %lu", (u_long)cp->ovflsize);
+ if (dbc->dbtype == DB_RECNO)
+ fprintf(stderr, " recno: %lu", (u_long)cp->recno);
+ if (F_ISSET(cp, C_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+}
+#endif
+
+/*
+ * Cursor adjustments are logged if they are for subtransactions. This is
+ * because it's possible for a subtransaction to adjust cursors which will
+ * still be active after the subtransaction aborts, and so which must be
+ * restored to their previous locations. Cursors that can be both affected
+ * by our cursor adjustments and active after our transaction aborts can
+ * only be found in our parent transaction -- cursors in other transactions,
+ * including other child transactions of our parent, must have conflicting
+ * locker IDs, and so cannot be affected by adjustments in this transaction.
+ */
+
+/*
+ * __bam_ca_delete --
+ * Update the cursors when items are deleted and when already deleted
+ * items are overwritten. Return the number of relevant cursors found.
+ *
+ * PUBLIC: int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_delete(dbp, pgno, indx, delete)
+ DB *dbp;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int delete;
+{
+ BTREE_CURSOR *cp;
+ DB *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ int count; /* !!!: Has to contain max number of cursors. */
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. We have the page write locked, so the
+ * only other cursors that can be pointing at a page are
+ * those in the same thread of control. Unfortunately, we don't
+ * know that they're using the same DB handle, so traverse
+ * all matching DB handles in the same DB_ENV, then all cursors
+ * on each matching DB handle.
+ *
+ * Each cursor is single-threaded, so we only need to lock the
+ * list of DBs and then the list of cursors in each DB.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (count = 0, ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp->pgno == pgno && cp->indx == indx) {
+ if (delete)
+ F_SET(cp, C_DELETED);
+ else
+ F_CLR(cp, C_DELETED);
+ ++count;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (count);
+}
+
+/*
+ * __ram_ca_delete --
+ * Return the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca_delete __P((DB *, db_pgno_t));
+ */
+int
+__ram_ca_delete(dbp, root_pgno)
+ DB *dbp;
+ db_pgno_t root_pgno;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int found;
+
+ found = 0;
+ dbenv = dbp->dbenv;
+
+ /*
+ * Review the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ found == 0 && ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ found == 0 && dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbc->internal->root == root_pgno)
+ found = 1;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ return (found);
+}
+
+/*
+ * __bam_ca_di --
+ * Adjust the cursors during a delete or insert.
+ *
+ * PUBLIC: int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_di(my_dbc, pgno, indx, adjust)
+ DBC *my_dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ int adjust;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == pgno && cp->indx >= indx) {
+ /* Cursor indices should never be negative. */
+ DB_ASSERT(cp->indx != 0 || adjust > 0);
+
+ cp->indx += adjust;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_opd_cursor -- create a new opd cursor.
+ */
+static int
+__bam_opd_cursor(dbp, dbc, first, tpgno, ti)
+ DB *dbp;
+ DBC *dbc;
+ db_pgno_t tpgno;
+ u_int32_t first, ti;
+{
+ BTREE_CURSOR *cp, *orig_cp;
+ DBC *dbc_nopd;
+ int ret;
+
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ dbc_nopd = NULL;
+
+ /*
+ * Allocate a new cursor and create the stack. If duplicates
+ * are sorted, we've just created an off-page duplicate Btree.
+ * If duplicates aren't sorted, we've just created a Recno tree.
+ *
+ * Note that in order to get here at all, there shouldn't be
+ * an old off-page dup cursor--to augment the checking db_c_newopd
+ * will do, assert this.
+ */
+ DB_ASSERT(orig_cp->opd == NULL);
+ if ((ret = __db_c_newopd(dbc, tpgno, orig_cp->opd, &dbc_nopd)) != 0)
+ return (ret);
+
+ cp = (BTREE_CURSOR *)dbc_nopd->internal;
+ cp->pgno = tpgno;
+ cp->indx = ti;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ cp->recno = ti + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ if (F_ISSET(orig_cp, C_DELETED)) {
+ F_SET(cp, C_DELETED);
+ F_CLR(orig_cp, C_DELETED);
+ }
+
+ /* Stack the cursors and reset the initial cursor's index. */
+ orig_cp->opd = dbc_nopd;
+ orig_cp->indx = first;
+ return (0);
+}
+
+/*
+ * __bam_ca_dup --
+ * Adjust the cursors when moving items from a leaf page to a duplicates
+ * page.
+ *
+ * PUBLIC: int __bam_ca_dup __P((DBC *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+ */
+int
+__bam_ca_dup(my_dbc, first, fpgno, fi, tpgno, ti)
+ DBC *my_dbc;
+ db_pgno_t fpgno, tpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ /* Find cursors pointing to this record. */
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+ if (orig_cp->pgno != fpgno || orig_cp->indx != fi)
+ continue;
+
+ /*
+ * Since we rescan the list see if this is already
+ * converted.
+ */
+ if (orig_cp->opd != NULL)
+ continue;
+
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = __bam_opd_cursor(dbp,
+ dbc, first, tpgno, ti)) !=0)
+ return (ret);
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ /* We released the mutex to get a cursor, start over. */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_undodup --
+ * Adjust the cursors when returning items to a leaf page
+ * from a duplicate page.
+ * Called only during undo processing.
+ *
+ * PUBLIC: int __bam_ca_undodup __P((DB *,
+ * PUBLIC: u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__bam_ca_undodup(dbp, first, fpgno, fi, ti)
+ DB *dbp;
+ db_pgno_t fpgno;
+ u_int32_t first, fi, ti;
+{
+ BTREE_CURSOR *orig_cp;
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ orig_cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * A note on the orig_cp->opd != NULL requirement here:
+ * it's possible that there's a cursor that refers to
+ * the same duplicate set, but which has no opd cursor,
+ * because it refers to a different item and we took
+ * care of it while processing a previous record.
+ */
+ if (orig_cp->pgno != fpgno ||
+ orig_cp->indx != first ||
+ orig_cp->opd == NULL ||
+ ((BTREE_CURSOR *)orig_cp->opd->internal)->indx
+ != ti)
+ continue;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ if ((ret = orig_cp->opd->c_close(orig_cp->opd)) != 0)
+ return (ret);
+ orig_cp->opd = NULL;
+ orig_cp->indx = fi;
+ /*
+ * We released the mutex to free a cursor,
+ * start over.
+ */
+ goto loop;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __bam_ca_rsplit --
+ * Adjust the cursors when doing reverse splits.
+ *
+ * PUBLIC: int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+ */
+int
+__bam_ca_rsplit(my_dbc, fpgno, tpgno)
+ DBC* my_dbc;
+ db_pgno_t fpgno, tpgno;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ if (dbc->internal->pgno == fpgno) {
+ dbc->internal->pgno = tpgno;
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
+ return (ret);
+ }
+ return (0);
+}
+
+/*
+ * __bam_ca_split --
+ * Adjust the cursors when splitting a page.
+ *
+ * PUBLIC: int __bam_ca_split __P((DBC *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+ */
+int
+__bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft)
+ DBC *my_dbc;
+ db_pgno_t ppgno, lpgno, rpgno;
+ u_int32_t split_indx;
+ int cleft;
+{
+ DB *dbp, *ldbp;
+ DBC *dbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ int found, ret;
+
+ dbp = my_dbc->dbp;
+ dbenv = dbp->dbenv;
+ my_txn = IS_SUBTRANSACTION(my_dbc->txn) ? my_dbc->txn : NULL;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * If splitting the page that a cursor was on, the cursor has to be
+ * adjusted to point to the same record as before the split. Most
+ * of the time we don't adjust pointers to the left page, because
+ * we're going to copy its contents back over the original page. If
+ * the cursor is on the right page, it is decremented by the number of
+ * records split to the left page.
+ */
+ found = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == ppgno) {
+ if (my_txn != NULL && dbc->txn != my_txn)
+ found = 1;
+ if (cp->indx < split_indx) {
+ if (cleft)
+ cp->pgno = lpgno;
+ } else {
+ cp->pgno = rpgno;
+ cp->indx -= split_indx;
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp,
+ my_dbc->txn, &lsn, 0, DB_CA_SPLIT, ppgno, rpgno,
+ cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_ca_undosplit --
+ * Adjust the cursors when undoing a split of a page.
+ * If we grew a level we will execute this for both the
+ * left and the right pages.
+ * Called only during undo processing.
+ *
+ * PUBLIC: void __bam_ca_undosplit __P((DB *,
+ * PUBLIC: db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+ */
+void
+__bam_ca_undosplit(dbp, frompgno, topgno, lpgno, split_indx)
+ DB *dbp;
+ db_pgno_t frompgno, topgno, lpgno;
+ u_int32_t split_indx;
+{
+ DB *ldbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DBC_INTERNAL *cp;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ *
+ * When backing out a split, we move the cursor back
+ * to the original offset and bump it by the split_indx.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ if (dbc->dbtype == DB_RECNO)
+ continue;
+ cp = dbc->internal;
+ if (cp->pgno == topgno) {
+ cp->pgno = frompgno;
+ cp->indx += split_indx;
+ } else if (cp->pgno == lpgno)
+ cp->pgno = frompgno;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+}
diff --git a/storage/bdb/btree/bt_cursor.c b/storage/bdb/btree/bt_cursor.c
new file mode 100644
index 00000000000..14d90e8873d
--- /dev/null
+++ b/storage/bdb/btree/bt_cursor.c
@@ -0,0 +1,2794 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_cursor.c,v 11.147 2002/08/13 20:46:07 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+static int __bam_bulk __P((DBC *, DBT *, u_int32_t));
+static int __bam_c_close __P((DBC *, db_pgno_t, int *));
+static int __bam_c_del __P((DBC *));
+static int __bam_c_destroy __P((DBC *));
+static int __bam_c_first __P((DBC *));
+static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __bam_c_getstack __P((DBC *));
+static int __bam_c_last __P((DBC *));
+static int __bam_c_next __P((DBC *, int, int));
+static int __bam_c_physdel __P((DBC *));
+static int __bam_c_prev __P((DBC *));
+static int __bam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __bam_c_search __P((DBC *,
+ db_pgno_t, const DBT *, u_int32_t, int *));
+static int __bam_c_writelock __P((DBC *));
+static int __bam_getboth_finddatum __P((DBC *, DBT *, u_int32_t));
+static int __bam_getbothc __P((DBC *, DBT *));
+static int __bam_get_prev __P((DBC *));
+static int __bam_isopd __P((DBC *, db_pgno_t *));
+
+/*
+ * Acquire a new page/lock. If we hold a page/lock, discard the page, and
+ * lock-couple the lock.
+ *
+ * !!!
+ * We have to handle both where we have a lock to lock-couple and where we
+ * don't -- we don't duplicate locks when we duplicate cursors if we are
+ * running in a transaction environment as there's no point if locks are
+ * never discarded. This means that the cursor may or may not hold a lock.
+ * In the case where we are decending the tree we always want to
+ * unlock the held interior page so we use ACQUIRE_COUPLE.
+ */
+#undef ACQUIRE
+#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, LCK_COUPLE, lpgno, mode, 0, &(lock));\
+ if ((ret) == 0) \
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
+}
+
+#undef ACQUIRE_COUPLE
+#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, \
+ LCK_COUPLE_ALWAYS, lpgno, mode, 0, &(lock)); \
+ if ((ret) == 0) \
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
+}
+
+/* Acquire a new page/lock for a cursor. */
+#undef ACQUIRE_CUR
+#define ACQUIRE_CUR(dbc, mode, p, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
+ __cp->lock_mode = (mode); \
+ } \
+}
+
+/*
+ * Acquire a new page/lock for a cursor and release the previous.
+ * This is typically used when decending a tree and we do not
+ * want to hold the interior nodes locked.
+ */
+#undef ACQUIRE_CUR_COUPLE
+#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ACQUIRE_COUPLE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
+ __cp->lock_mode = (mode); \
+ } \
+}
+
+/*
+ * Acquire a write lock if we don't already have one.
+ *
+ * !!!
+ * See ACQUIRE macro on why we handle cursors that don't have locks.
+ */
+#undef ACQUIRE_WRITE_LOCK
+#define ACQUIRE_WRITE_LOCK(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ ret = 0; \
+ if (STD_LOCKING(dbc) && \
+ __cp->lock_mode != DB_LOCK_WRITE && \
+ ((ret) = __db_lget(dbc, \
+ LOCK_ISSET(__cp->lock) ? LCK_COUPLE : 0, \
+ __cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \
+ __cp->lock_mode = DB_LOCK_WRITE; \
+}
+
+/* Discard the current page/lock. */
+#undef DISCARD
+#define DISCARD(dbc, ldiscard, lock, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ int __t_ret; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if (ldiscard) \
+ __t_ret = __LPUT((dbc), lock); \
+ else \
+ __t_ret = __TLPUT((dbc), lock); \
+ if (__t_ret != 0 && (ret) == 0) \
+ ret = __t_ret; \
+}
+
+/* Discard the current page/lock for a cursor. */
+#undef DISCARD_CUR
+#define DISCARD_CUR(dbc, ret) { \
+ BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
+ DISCARD(dbc, 0, __cp->lock, __cp->page, ret); \
+ if ((ret) == 0) \
+ __cp->lock_mode = DB_LOCK_NG; \
+}
+
+/* If on-page item is a deleted record. */
+#undef IS_DELETED
+#define IS_DELETED(dbp, page, indx) \
+ B_DISSET(GET_BKEYDATA(dbp, page, \
+ (indx) + (TYPE(page) == P_LBTREE ? O_INDX : 0))->type)
+#undef IS_CUR_DELETED
+#define IS_CUR_DELETED(dbc) \
+ IS_DELETED((dbc)->dbp, (dbc)->internal->page, (dbc)->internal->indx)
+
+/*
+ * Test to see if two cursors could point to duplicates of the same key.
+ * In the case of off-page duplicates they are they same, as the cursors
+ * will be in the same off-page duplicate tree. In the case of on-page
+ * duplicates, the key index offsets must be the same. For the last test,
+ * as the original cursor may not have a valid page pointer, we use the
+ * current cursor's.
+ */
+#undef IS_DUPLICATE
+#define IS_DUPLICATE(dbc, i1, i2) \
+ (P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i1] == \
+ P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i2])
+#undef IS_CUR_DUPLICATE
+#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx) \
+ (F_ISSET(dbc, DBC_OPD) || \
+ (orig_pgno == (dbc)->internal->pgno && \
+ IS_DUPLICATE(dbc, (dbc)->internal->indx, orig_indx)))
+
+/*
+ * __bam_c_init --
+ * Initialize the access private portion of a cursor
+ *
+ * PUBLIC: int __bam_c_init __P((DBC *, DBTYPE));
+ */
+int
+__bam_c_init(dbc, dbtype)
+ DBC *dbc;
+ DBTYPE dbtype;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+
+ /* Allocate/initialize the internal structure. */
+ if (dbc->internal == NULL && (ret =
+ __os_malloc(dbenv, sizeof(BTREE_CURSOR), &dbc->internal)) != 0)
+ return (ret);
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ if (dbtype == DB_BTREE) {
+ dbc->c_am_bulk = __bam_bulk;
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __bam_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __bam_c_get;
+ dbc->c_am_put = __bam_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ } else {
+ dbc->c_am_bulk = __bam_bulk;
+ dbc->c_am_close = __bam_c_close;
+ dbc->c_am_del = __ram_c_del;
+ dbc->c_am_destroy = __bam_c_destroy;
+ dbc->c_am_get = __ram_c_get;
+ dbc->c_am_put = __ram_c_put;
+ dbc->c_am_writelock = __bam_c_writelock;
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_c_refresh
+ * Set things up properly for cursor re-use.
+ *
+ * PUBLIC: int __bam_c_refresh __P((DBC *));
+ */
+int
+__bam_c_refresh(dbc)
+ DBC *dbc;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * If our caller set the root page number, it's because the root was
+ * known. This is always the case for off page dup cursors. Else,
+ * pull it out of our internal information.
+ */
+ if (cp->root == PGNO_INVALID)
+ cp->root = t->bt_root;
+
+ LOCK_INIT(cp->lock);
+ cp->lock_mode = DB_LOCK_NG;
+
+ cp->sp = cp->csp = cp->stack;
+ cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]);
+
+ /*
+ * The btree leaf page data structures require that two key/data pairs
+ * (or four items) fit on a page, but other than that there's no fixed
+ * requirement. The btree off-page duplicates only require two items,
+ * to be exact, but requiring four for them as well seems reasonable.
+ *
+ * Recno uses the btree bt_ovflsize value -- it's close enough.
+ */
+ cp->ovflsize = B_MINKEY_TO_OVFLSIZE(
+ dbp, F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey, dbp->pgsize);
+
+ cp->recno = RECNO_OOB;
+ cp->order = INVALID_ORDER;
+ cp->flags = 0;
+
+ /* Initialize for record numbers. */
+ if (F_ISSET(dbc, DBC_OPD) ||
+ dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_AM_RECNUM)) {
+ F_SET(cp, C_RECNUM);
+
+ /*
+ * All btrees that support record numbers, optionally standard
+ * recno trees, and all off-page duplicate recno trees have
+ * mutable record numbers.
+ */
+ if ((F_ISSET(dbc, DBC_OPD) && dbc->dbtype == DB_RECNO) ||
+ F_ISSET(dbp, DB_AM_RECNUM | DB_AM_RENUMBER))
+ F_SET(cp, C_RENUMBER);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_c_close --
+ * Close down the cursor.
+ */
+static int
+__bam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ BTREE_CURSOR *cp, *cp_opd, *cp_c;
+ DB *dbp;
+ DBC *dbc_opd, *dbc_c;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int cdb_lock, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ cp_opd = (dbc_opd = cp->opd) == NULL ?
+ NULL : (BTREE_CURSOR *)dbc_opd->internal;
+ cdb_lock = ret = 0;
+
+ /*
+ * There are 3 ways this function is called:
+ *
+ * 1. Closing a primary cursor: we get called with a pointer to a
+ * primary cursor that has a NULL opd field. This happens when
+ * closing a btree/recno database cursor without an associated
+ * off-page duplicate tree.
+ *
+ * 2. Closing a primary and an off-page duplicate cursor stack: we
+ * get called with a pointer to the primary cursor which has a
+ * non-NULL opd field. This happens when closing a btree cursor
+ * into database with an associated off-page btree/recno duplicate
+ * tree. (It can't be a primary recno database, recno databases
+ * don't support duplicates.)
+ *
+ * 3. Closing an off-page duplicate cursor stack: we get called with
+ * a pointer to the off-page duplicate cursor. This happens when
+ * closing a non-btree database that has an associated off-page
+ * btree/recno duplicate tree or for a btree database when the
+ * opd tree is not empty (root_pgno == PGNO_INVALID).
+ *
+ * If either the primary or off-page duplicate cursor deleted a btree
+ * key/data pair, check to see if the item is still referenced by a
+ * different cursor. If it is, confirm that cursor's delete flag is
+ * set and leave it to that cursor to do the delete.
+ *
+ * NB: The test for == 0 below is correct. Our caller already removed
+ * our cursor argument from the active queue, we won't find it when we
+ * search the queue in __bam_ca_delete().
+ * NB: It can't be true that both the primary and off-page duplicate
+ * cursors have deleted a btree key/data pair. Either the primary
+ * cursor may have deleted an item and there's no off-page duplicate
+ * cursor, or there's an off-page duplicate cursor and it may have
+ * deleted an item.
+ *
+ * Primary recno databases aren't an issue here. Recno keys are either
+ * deleted immediately or never deleted, and do not have to be handled
+ * here.
+ *
+ * Off-page duplicate recno databases are an issue here, cases #2 and
+ * #3 above can both be off-page recno databases. The problem is the
+ * same as the final problem for off-page duplicate btree databases.
+ * If we no longer need the off-page duplicate tree, we want to remove
+ * it. For off-page duplicate btrees, we are done with the tree when
+ * we delete the last item it contains, i.e., there can be no further
+ * references to it when it's empty. For off-page duplicate recnos,
+ * we remove items from the tree as the application calls the remove
+ * function, so we are done with the tree when we close the last cursor
+ * that references it.
+ *
+ * We optionally take the root page number from our caller. If the
+ * primary database is a btree, we can get it ourselves because dbc
+ * is the primary cursor. If the primary database is not a btree,
+ * the problem is that we may be dealing with a stack of pages. The
+ * cursor we're using to do the delete points at the bottom of that
+ * stack and we need the top of the stack.
+ */
+ if (F_ISSET(cp, C_DELETED)) {
+ dbc_c = dbc;
+ switch (dbc->dbtype) {
+ case DB_BTREE: /* Case #1, #3. */
+ if (__bam_ca_delete(dbp, cp->pgno, cp->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (!F_ISSET(dbc, DBC_OPD)) /* Case #1. */
+ goto done;
+ /* Case #3. */
+ if (__ram_ca_delete(dbp, cp->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+
+ if (dbc_opd == NULL)
+ goto done;
+
+ if (F_ISSET(cp_opd, C_DELETED)) { /* Case #2. */
+ /*
+ * We will not have been provided a root page number. Acquire
+ * one from the primary database.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
+ goto err;
+ root_pgno = GET_BOVERFLOW(dbp, h, cp->indx + O_INDX)->pgno;
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+
+ dbc_c = dbc_opd;
+ switch (dbc_opd->dbtype) {
+ case DB_BTREE:
+ if (__bam_ca_delete(
+ dbp, cp_opd->pgno, cp_opd->indx, 1) == 0)
+ goto lock;
+ goto done;
+ case DB_RECNO:
+ if (__ram_ca_delete(dbp, cp_opd->root) == 0)
+ goto lock;
+ goto done;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__bam_c_close", dbc->dbtype));
+ }
+ }
+ goto done;
+
+lock: cp_c = (BTREE_CURSOR *)dbc_c->internal;
+
+ /*
+ * If this is CDB, upgrade the lock if necessary. While we acquired
+ * the write lock to logically delete the record, we released it when
+ * we returned from that call, and so may not be holding a write lock
+ * at the moment. NB: to get here in CDB we must either be holding a
+ * write lock or be the only cursor that is permitted to acquire write
+ * locks. The reason is that there can never be more than a single CDB
+ * write cursor (that cursor cannot be dup'd), and so that cursor must
+ * be closed and the item therefore deleted before any other cursor
+ * could acquire a reference to this item.
+ *
+ * Note that dbc may be an off-page dup cursor; this is the sole
+ * instance in which an OPD cursor does any locking, but it's necessary
+ * because we may be closed by ourselves without a parent cursor
+ * handy, and we have to do a lock upgrade on behalf of somebody.
+ * If this is the case, the OPD has been given the parent's locking
+ * info in __db_c_get--the OPD is also a WRITEDUP.
+ */
+ if (CDB_LOCKING(dbp->dbenv)) {
+ if (F_ISSET(dbc, DBC_WRITEDUP | DBC_WRITECURSOR)) {
+ if ((ret = dbp->dbenv->lock_get(
+ dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ cdb_lock = 1;
+ }
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+
+ goto delete;
+ }
+
+ /*
+ * The variable dbc_c has been initialized to reference the cursor in
+ * which we're going to do the delete. Initialize the cursor's page
+ * and lock structures as necessary.
+ *
+ * First, we may not need to acquire any locks. If we're in case #3,
+ * that is, the primary database isn't a btree database, our caller
+ * is responsible for acquiring any necessary locks before calling us.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ goto err;
+ goto delete;
+ }
+
+ /*
+ * Otherwise, acquire a write lock. If the cursor that did the initial
+ * logical deletion (and which had a write lock) is not the same as the
+ * cursor doing the physical deletion (which may have only ever had a
+ * read lock on the item), we need to upgrade. The confusion comes as
+ * follows:
+ *
+ * C1 created, acquires item read lock
+ * C2 dup C1, create C2, also has item read lock.
+ * C1 acquire write lock, delete item
+ * C1 close
+ * C2 close, needs a write lock to physically delete item.
+ *
+ * If we're in a TXN, we know that C2 will be able to acquire the write
+ * lock, because no locker other than the one shared by C1 and C2 can
+ * acquire a write lock -- the original write lock C1 acquire was never
+ * discarded.
+ *
+ * If we're not in a TXN, it's nastier. Other cursors might acquire
+ * read locks on the item after C1 closed, discarding its write lock,
+ * and such locks would prevent C2 from acquiring a read lock. That's
+ * OK, though, we'll simply wait until we can acquire a read lock, or
+ * we'll deadlock. (Which better not happen, since we're not in a TXN.)
+ *
+ * Lock the primary database page, regardless of whether we're deleting
+ * an item on a primary database page or an off-page duplicates page.
+ */
+ ACQUIRE(dbc, DB_LOCK_WRITE,
+ cp->pgno, cp_c->lock, cp_c->pgno, cp_c->page, ret);
+ if (ret != 0)
+ goto err;
+
+delete: /*
+ * If the delete occurred in a btree, delete the on-page physical item
+ * referenced by the cursor.
+ */
+ if (dbc_c->dbtype == DB_BTREE && (ret = __bam_c_physdel(dbc_c)) != 0)
+ goto err;
+
+ /*
+ * If we're not working in an off-page duplicate tree, then we're
+ * done.
+ */
+ if (!F_ISSET(dbc_c, DBC_OPD) || root_pgno == PGNO_INVALID)
+ goto done;
+
+ /*
+ * We may have just deleted the last element in the off-page duplicate
+ * tree, and closed the last cursor in the tree. For an off-page btree
+ * there are no other cursors in the tree by definition, if the tree is
+ * empty. For an off-page recno we know we have closed the last cursor
+ * in the tree because the __ram_ca_delete call above returned 0 only
+ * in that case. So, if the off-page duplicate tree is empty at this
+ * point, we want to remove it.
+ */
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0)
+ goto err;
+ if (NUM_ENT(h) == 0) {
+ if ((ret = __db_free(dbc, h)) != 0)
+ goto err;
+ } else {
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * When removing the tree, we have to do one of two things. If this is
+ * case #2, that is, the primary tree is a btree, delete the key that's
+ * associated with the tree from the btree leaf page. We know we are
+ * the only reference to it and we already have the correct lock. We
+ * detect this case because the cursor that was passed to us references
+ * an off-page duplicate cursor.
+ *
+ * If this is case #3, that is, the primary tree isn't a btree, pass
+ * the information back to our caller, it's their job to do cleanup on
+ * the primary page.
+ */
+ if (dbc_opd != NULL) {
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ if ((ret = __bam_c_physdel(dbc)) != 0)
+ goto err;
+ } else
+ *rmroot = 1;
+err:
+done: /*
+ * Discard the page references and locks, and confirm that the stack
+ * has been emptied.
+ */
+ if (dbc_opd != NULL) {
+ DISCARD_CUR(dbc_opd, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ DISCARD_CUR(dbc, t_ret);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Downgrade any CDB lock we acquired. */
+ if (cdb_lock)
+ (void)__lock_downgrade(
+ dbp->dbenv, &dbc->mylock, DB_LOCK_IWRITE, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__bam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->dbp->dbenv, dbc->internal);
+
+ return (0);
+}
+
+/*
+ * __bam_c_count --
+ * Return a count of on and off-page duplicates.
+ *
+ * PUBLIC: int __bam_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__bam_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_indx_t indx, top;
+ db_recno_t recno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called with the top-level cursor that may reference an off-page
+ * duplicates page. If it's a set of on-page duplicates, get the
+ * page and count. Otherwise, get the root page of the off-page
+ * duplicate tree, and use the count. We don't have to acquire any
+ * new locks, we have to have a read lock to even get here.
+ */
+ if (cp->opd == NULL) {
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * Move back to the beginning of the set of duplicates and
+ * then count forward.
+ */
+ for (indx = cp->indx;; indx -= P_INDX)
+ if (indx == 0 ||
+ !IS_DUPLICATE(dbc, indx, indx - P_INDX))
+ break;
+ for (recno = 1, top = NUM_ENT(cp->page) - P_INDX;
+ indx < top; ++recno, indx += P_INDX)
+ if (!IS_DUPLICATE(dbc, indx, indx + P_INDX))
+ break;
+ *recnop = recno;
+ } else {
+ if ((ret =
+ mpf->get(mpf, &cp->opd->internal->root, 0, &cp->page)) != 0)
+ return (ret);
+
+ *recnop = RE_NREC(cp->page);
+ }
+
+ ret = mpf->put(mpf, cp->page, 0);
+ cp->page = NULL;
+
+ return (ret);
+}
+
+/*
+ * __bam_c_del --
+ * Delete using a cursor.
+ */
+static int
+__bam_c_del(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* If the item was already deleted, return failure. */
+ if (F_ISSET(cp, C_DELETED))
+ return (DB_KEYEMPTY);
+
+ /*
+ * This code is always called with a page lock but no page.
+ */
+ DB_ASSERT(cp->page == NULL);
+
+ /*
+ * We don't physically delete the record until the cursor moves, so
+ * we have to have a long-lived write lock on the page instead of a
+ * a long-lived read lock. Note, we have to have a read lock to even
+ * get here.
+ *
+ * If we're maintaining record numbers, we lock the entire tree, else
+ * we lock the single page.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ cp->page = cp->csp->page;
+ } else {
+ ACQUIRE_CUR(dbc, DB_LOCK_WRITE, cp->pgno, ret);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cdel_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+
+ /* Set the intent-to-delete flag on the page. */
+ if (TYPE(cp->page) == P_LBTREE)
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx + O_INDX)->type);
+ else
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type);
+
+ /* Mark the page dirty. */
+ ret = mpf->set(mpf, cp->page, DB_MPOOL_DIRTY);
+
+err: /*
+ * If we've been successful so far and the tree has record numbers,
+ * adjust the record counts. Either way, release acquired page(s).
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ if (ret == 0)
+ ret = __bam_adjust(dbc, -1);
+ (void)__bam_stkrel(dbc, 0);
+ } else
+ if (cp->page != NULL &&
+ (t_ret = mpf->put(mpf, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ cp->page = NULL;
+
+ /* Update the cursors last, after all chance of failure is past. */
+ if (ret == 0)
+ (void)__bam_ca_delete(dbp, cp->pgno, cp->indx, 1);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_dup --
+ * Duplicate a btree cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __bam_c_dup __P((DBC *, DBC *));
+ */
+int
+__bam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ BTREE_CURSOR *orig, *new;
+ int ret;
+
+ orig = (BTREE_CURSOR *)orig_dbc->internal;
+ new = (BTREE_CURSOR *)new_dbc->internal;
+
+ /*
+ * If we're holding a lock we need to acquire a copy of it, unless
+ * we're in a transaction. We don't need to copy any lock we're
+ * holding inside a transaction because all the locks are retained
+ * until the transaction commits or aborts.
+ */
+ if (LOCK_ISSET(orig->lock) && orig_dbc->txn == NULL) {
+ if ((ret = __db_lget(new_dbc,
+ 0, new->pgno, new->lock_mode, 0, &new->lock)) != 0)
+ return (ret);
+ }
+ new->ovflsize = orig->ovflsize;
+ new->recno = orig->recno;
+ new->flags = orig->flags;
+
+ return (0);
+}
+
+/*
+ * __bam_c_get --
+ * Get using a cursor (btree).
+ */
+static int
+__bam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t orig_pgno;
+ db_indx_t orig_indx;
+ int exact, newopd, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ orig_pgno = cp->pgno;
+ orig_indx = cp->indx;
+
+ newopd = 0;
+ switch (flags) {
+ case DB_CURRENT:
+ /* It's not possible to return a deleted record. */
+ if (F_ISSET(cp, C_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_FIRST:
+ newopd = 1;
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * There are two ways to get here based on DBcursor->c_get
+ * with the DB_GET_BOTH/DB_GET_BOTH_RANGE flags set:
+ *
+ * 1. Searching a sorted off-page duplicate tree: do a tree
+ * search.
+ *
+ * 2. Searching btree: do a tree search. If it returns a
+ * reference to off-page duplicate tree, return immediately
+ * and let our caller deal with it. If the search doesn't
+ * return a reference to off-page duplicate tree, continue
+ * with an on-page search.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = __bam_c_search(
+ dbc, PGNO_INVALID, data, flags, &exact)) != 0)
+ goto err;
+ if (flags == DB_GET_BOTH) {
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ }
+
+ /*
+ * We didn't require an exact match, so the search may
+ * may have returned an entry past the end of the page,
+ * or we may be referencing a deleted record. If so,
+ * move to the next entry.
+ */
+ if ((cp->indx == NUM_ENT(cp->page) ||
+ IS_CUR_DELETED(dbc)) &&
+ (ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ } else {
+ if ((ret = __bam_c_search(
+ dbc, PGNO_INVALID, key, flags, &exact)) != 0)
+ return (ret);
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop)) {
+ newopd = 1;
+ break;
+ }
+ if ((ret =
+ __bam_getboth_finddatum(dbc, data, flags)) != 0)
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ if ((ret = __bam_getbothc(dbc, data)) != 0)
+ goto err;
+ break;
+ case DB_LAST:
+ newopd = 1;
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ break;
+ case DB_NEXT:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ break;
+ case DB_NEXT_DUP:
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ if (!IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_NEXT_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_first(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_PREV:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ break;
+ case DB_PREV_NODUP:
+ newopd = 1;
+ if (cp->pgno == PGNO_INVALID) {
+ if ((ret = __bam_c_last(dbc)) != 0)
+ goto err;
+ } else
+ do {
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ goto err;
+ } while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
+ break;
+ case DB_SET:
+ case DB_SET_RECNO:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
+ goto err;
+ break;
+ case DB_SET_RANGE:
+ newopd = 1;
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
+ goto err;
+
+ /*
+ * As we didn't require an exact match, the search function
+ * may have returned an entry past the end of the page. Or,
+ * we may be referencing a deleted record. If so, move to
+ * the next entry.
+ */
+ if (cp->indx == NUM_ENT(cp->page) || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * We may have moved to an off-page duplicate tree. Return that
+ * information to our caller.
+ */
+ if (newopd && pgnop != NULL)
+ (void)__bam_isopd(dbc, pgnop);
+
+ /*
+ * Don't return the key, it was passed to us (this is true even if the
+ * application defines a compare function returning equality for more
+ * than one key value, since in that case which actual value we store
+ * in the database is undefined -- and particularly true in the case of
+ * duplicates where we only store one key value).
+ */
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTH_RANGE || flags == DB_SET)
+ F_SET(key, DB_DBT_ISSET);
+
+err: /*
+ * Regardless of whether we were successful or not, if the cursor
+ * moved, clear the delete flag, DBcursor->c_get never references
+ * a deleted key, if it moved at all.
+ */
+ if (F_ISSET(cp, C_DELETED) &&
+ (cp->pgno != orig_pgno || cp->indx != orig_indx))
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+static int
+__bam_get_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ if (__bam_isopd(dbc, &pgno)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if ((ret = __db_c_newopd(dbc, pgno, cp->opd, &cp->opd)) != 0)
+ return (ret);
+ if ((ret = cp->opd->c_am_get(cp->opd,
+ &key, &data, DB_LAST, NULL)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_bulk -- Return bulk data from a btree.
+ */
+static int
+__bam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t *inp, indx, pg_keyoff;
+ int32_t *endp, key_off, *offp, *saveoffp;
+ u_int8_t *dbuf, *dp, *np;
+ u_int32_t key_size, size, space;
+ int adj, is_key, need_pg, next_key, no_dup;
+ int pagesize, rec_key, ret;
+
+ ret = 0;
+ key_off = 0;
+ size = 0;
+ pagesize = dbc->dbp->pgsize;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * dp tracks the beginging of the page in the buffer.
+ * np is the next place to copy things into the buffer.
+ * dbuf always stays at the beging of the buffer.
+ */
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is a termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *)((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+
+ /*
+ * Distinguish between BTREE and RECNO.
+ * There are no keys in RECNO. If MULTIPLE_KEY is specified
+ * then we return the record numbers.
+ * is_key indicates that multiple btree keys are returned.
+ * rec_key is set if we are returning record numbers.
+ * next_key is set if we are going after the next key rather than dup.
+ */
+ if (dbc->dbtype == DB_BTREE) {
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1: 0;
+ rec_key = 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 2;
+ } else {
+ is_key = 0;
+ rec_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 1;
+ }
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+
+next_pg:
+ indx = cp->indx;
+ pg = cp->page;
+
+ inp = P_INP(dbc->dbp, pg);
+ /* The current page is not yet in the buffer. */
+ need_pg = 1;
+
+ /*
+ * Keep track of the offset of the current key on the page.
+ * If we are returning keys, set it to 0 first so we force
+ * the copy of the key to the buffer.
+ */
+ pg_keyoff = 0;
+ if (is_key == 0)
+ pg_keyoff = inp[indx];
+
+ do {
+ if (IS_DELETED(dbc->dbp, pg, indx)) {
+ if (dbc->dbtype != DB_RECNO)
+ continue;
+
+ cp->recno++;
+ /*
+ * If we are not returning recnos then we
+ * need to fill in every slot so the user
+ * can calculate the record numbers.
+ */
+ if (rec_key != 0)
+ continue;
+
+ space -= 2 * sizeof(*offp);
+ /* Check if space as underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /* Just mark the empty recno slots. */
+ *offp-- = 0;
+ *offp-- = 0;
+ continue;
+ }
+
+ /*
+ * Check to see if we have a new key.
+ * If so, then see if we need to put the
+ * key on the page. If its already there
+ * then we just point to it.
+ */
+ if (is_key && pg_keyoff != inp[indx]) {
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = key_size = bo->tlen;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ /* Nothing added, then error. */
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * We need to back up to the
+ * last record put into the
+ * buffer so that it is
+ * CURRENT.
+ */
+ if (indx != 0)
+ indx -= P_INDX;
+ else {
+ if ((ret =
+ __bam_get_prev(
+ dbc)) != 0)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ break;
+ }
+ /*
+ * Move the data part of the page
+ * to the buffer.
+ */
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = bk->len;
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ pg_keyoff = inp[indx];
+ }
+ }
+
+ /*
+ * Reserve space for the pointers and sizes.
+ * Either key/data pair or just for a data item.
+ */
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (rec_key)
+ space -= sizeof(*offp);
+
+ /* Check to see if space has underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /*
+ * Determine if the next record is in the
+ * buffer already or if it needs to be copied in.
+ * If we have an off page dup, then copy as many
+ * as will fit into the buffer.
+ */
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx + adj - 1);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ bo = (BOVERFLOW *)bk;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ /*
+ * We pass the offset of the current key.
+ * On return we check to see if offp has
+ * moved to see if any data fit.
+ */
+ saveoffp = offp;
+ if ((ret = __bam_bulk_duplicates(dbc, bo->pgno,
+ dbuf, is_key ? offp + P_INDX : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ /* If nothing was added, then error. */
+ if (offp == saveoffp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ } else if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space)
+ goto back_up;
+ if ((ret =
+ __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ *offp-- = size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ /*
+ * Back up the index so that the
+ * last record in the buffer is CURRENT
+ */
+ if (indx >= adj)
+ indx -= adj;
+ else {
+ if ((ret =
+ __bam_get_prev(dbc)) != 0 &&
+ ret != DB_NOTFOUND)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno--;
+get_space:
+ /*
+ * See if we put anything in the
+ * buffer or if we are doing a DBP->get
+ * did we get all of the data.
+ */
+ if (offp >=
+ (is_key ? &endp[-1] : endp) ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ /*
+ * Add the offsets and sizes to the end of the buffer.
+ * First add the key info then the data info.
+ */
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(inp[indx + adj - 1] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ *offp-- = bk->len;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno++;
+ else if (no_dup) {
+ while (indx + adj < NUM_ENT(pg) &&
+ pg_keyoff == inp[indx + adj])
+ indx += adj;
+ }
+ /*
+ * Stop when we either run off the page or we
+ * move to the next key and we are not returning mulitple keys.
+ */
+ } while ((indx += adj) < NUM_ENT(pg) &&
+ (next_key || pg_keyoff == inp[indx]));
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ cp->indx = indx;
+ ret = __bam_c_next(dbc, 0, 1);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+
+ /*
+ * If we did a DBP->get we must error if we did not return
+ * all the data for the current key because there is
+ * no way to know if we did not get it all, nor any
+ * interface to fetch the balance.
+ */
+
+ if (ret == 0 &&
+ F_ISSET(dbc, DBC_TRANSIENT) && pg_keyoff == inp[indx]) {
+ data->size = (data->ulen - space) + size;
+ return (ENOMEM);
+ }
+ /*
+ * Must leave the index pointing at the last record fetched.
+ * If we are not fetching keys, we may have stepped to the
+ * next key.
+ */
+ if (next_key || pg_keyoff == inp[indx])
+ cp->indx = indx;
+ else
+ cp->indx = indx - P_INDX;
+
+ if (rec_key == 1)
+ *offp = (u_int32_t) RECNO_OOB;
+ else
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
+/*
+ * __bam_bulk_overflow --
+ * Dump overflow record into the buffer.
+ * The space requirements have already been checked.
+ * PUBLIC: int __bam_bulk_overflow
+ * PUBLIC: __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *));
+ */
+int
+__bam_bulk_overflow(dbc, len, pgno, dp)
+ DBC *dbc;
+ u_int32_t len;
+ db_pgno_t pgno;
+ u_int8_t *dp;
+{
+ DBT dbt;
+
+ memset(&dbt, 0, sizeof(dbt));
+ F_SET(&dbt, DB_DBT_USERMEM);
+ dbt.ulen = len;
+ dbt.data = (void *)dp;
+ return (__db_goff(dbc->dbp, &dbt, len, pgno, NULL, NULL));
+}
+
+/*
+ * __bam_bulk_duplicates --
+ * Put as many off page duplicates as will fit into the buffer.
+ * This routine will adjust the cursor to reflect the position in
+ * the overflow tree.
+ * PUBLIC: int __bam_bulk_duplicates __P((DBC *,
+ * PUBLIC: db_pgno_t, u_int8_t *, int32_t *,
+ * PUBLIC: int32_t **, u_int8_t **, u_int32_t *, int));
+ */
+int
+__bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int8_t *dbuf;
+ int32_t *keyoff, **offpp;
+ u_int8_t **dpp;
+ u_int32_t *spacep;
+ int no_dup;
+{
+ DB *dbp;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ DBC *opd;
+ DBT key, data;
+ PAGE *pg;
+ db_indx_t indx, *inp;
+ int32_t *offp;
+ u_int32_t size, space;
+ u_int8_t *dp, *np;
+ int first, need_pg, pagesize, ret, t_ret;
+
+ ret = 0;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ opd = cp->opd;
+
+ if (opd == NULL) {
+ if ((ret = __db_c_newopd(dbc, pgno, NULL, &opd)) != 0)
+ return (ret);
+ cp->opd = opd;
+ if ((ret = opd->c_am_get(opd,
+ &key, &data, DB_FIRST, NULL)) != 0)
+ return (ret);
+ }
+
+ pagesize = opd->dbp->pgsize;
+ cp = (BTREE_CURSOR *)opd->internal;
+ space = *spacep;
+ /* Get current offset slot. */
+ offp = *offpp;
+
+ /*
+ * np is the next place to put data.
+ * dp is the begining of the current page in the buffer.
+ */
+ np = dp = *dpp;
+ first = 1;
+ indx = cp->indx;
+
+ do {
+ /* Fetch the current record. No initial move. */
+ if ((ret = __bam_c_next(opd, 0, 0)) != 0)
+ break;
+ pg = cp->page;
+ indx = cp->indx;
+ inp = P_INP(dbp, pg);
+ /* We need to copy the page to the buffer. */
+ need_pg = 1;
+
+ do {
+ if (IS_DELETED(dbp, pg, indx))
+ goto contin;
+ bk = GET_BKEYDATA(dbp, pg, indx);
+ space -= 2 * sizeof(*offp);
+ /* Allocate space for key if needed. */
+ if (first == 0 && keyoff != NULL)
+ space -= 2 * sizeof(*offp);
+
+ /* Did space underflow? */
+ if (space > *spacep) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + -(int32_t)space;
+ if (need_pg)
+ space += pagesize - HOFFSET(pg);
+ }
+ break;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+ ret = ENOMEM;
+ /* Return space required. */
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ size = bk->len;
+ *offp-- = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ }
+ *offp-- = size;
+ first = 0;
+ if (no_dup)
+ break;
+contin:
+ indx++;
+ if (opd->dbtype == DB_RECNO)
+ cp->recno++;
+ } while (indx < NUM_ENT(pg));
+ if (no_dup)
+ break;
+ cp->indx = indx;
+
+ } while (ret == 0);
+
+ /* Return the updated information. */
+ *spacep = space;
+ *offpp = offp;
+ *dpp = np;
+
+ /*
+ * If we ran out of space back up the pointer.
+ * If we did not return any dups or reached the end, close the opd.
+ */
+ if (ret == ENOMEM) {
+ if (opd->dbtype == DB_RECNO) {
+ if (--cp->recno == 0)
+ goto close_opd;
+ } else if (indx != 0)
+ cp->indx--;
+ else {
+ t_ret = __bam_c_prev(opd);
+ if (t_ret == DB_NOTFOUND)
+ goto close_opd;
+ if (t_ret != 0)
+ ret = t_ret;
+ }
+ } else if (keyoff == NULL && ret == DB_NOTFOUND) {
+ cp->indx--;
+ if (opd->dbtype == DB_RECNO)
+ --cp->recno;
+ } else if (indx == 0 || ret == DB_NOTFOUND) {
+close_opd:
+ opd->c_close(opd);
+ ((BTREE_CURSOR *)dbc->internal)->opd = NULL;
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * __bam_getbothc --
+ * Search for a matching data item on a join.
+ */
+static int
+__bam_getbothc(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ int cmp, exact, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Acquire the current page. We have at least a read-lock
+ * already. The caller may have set DB_RMW asking for a
+ * write lock, but upgrading to a write lock has no better
+ * chance of succeeding now instead of later, so don't try.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+
+ /*
+ * An off-page duplicate cursor. Search the remaining duplicates
+ * for one which matches (do a normal btree search, then verify
+ * that the retrieved record is greater than the original one).
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ /*
+ * Check to make sure the desired item comes strictly after
+ * the current position; if it doesn't, return DB_NOTFOUND.
+ */
+ if ((ret = __bam_cmp(dbp, data, cp->page, cp->indx,
+ dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare,
+ &cmp)) != 0)
+ return (ret);
+
+ if (cmp <= 0)
+ return (DB_NOTFOUND);
+
+ /* Discard the current page, we're going to do a full search. */
+ if ((ret = mpf->put(mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+
+ return (__bam_c_search(dbc,
+ PGNO_INVALID, data, DB_GET_BOTH, &exact));
+ }
+
+ /*
+ * We're doing a DBC->c_get(DB_GET_BOTHC) and we're already searching
+ * a set of on-page duplicates (either sorted or unsorted). Continue
+ * a linear search from after the current position.
+ *
+ * (Note that we could have just finished a "set" of one duplicate,
+ * i.e. not a duplicate at all, but the following check will always
+ * return DB_NOTFOUND in this case, which is the desired behavior.)
+ */
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ return (DB_NOTFOUND);
+ cp->indx += P_INDX;
+
+ return (__bam_getboth_finddatum(dbc, data, DB_GET_BOTH));
+}
+
+/*
+ * __bam_getboth_finddatum --
+ * Find a matching on-page data item.
+ */
+static int
+__bam_getboth_finddatum(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ db_indx_t base, lim, top;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Called (sometimes indirectly) from DBC->get to search on-page data
+ * item(s) for a matching value. If the original flag was DB_GET_BOTH
+ * or DB_GET_BOTH_RANGE, the cursor is set to the first undeleted data
+ * item for the key. If the original flag was DB_GET_BOTHC, the cursor
+ * argument is set to the first data item we can potentially return.
+ * In both cases, there may or may not be additional duplicate data
+ * items to search.
+ *
+ * If the duplicates are not sorted, do a linear search.
+ */
+ if (dbp->dup_compare == NULL) {
+ for (;; cp->indx += P_INDX) {
+ if (!IS_CUR_DELETED(dbc) &&
+ (ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ return (0);
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
+ break;
+ }
+ return (DB_NOTFOUND);
+ }
+
+ /*
+ * If the duplicates are sorted, do a binary search. The reason for
+ * this is that large pages and small key/data pairs result in large
+ * numbers of on-page duplicates before they get pushed off-page.
+ *
+ * Find the top and bottom of the duplicate set. Binary search
+ * requires at least two items, don't loop if there's only one.
+ */
+ for (base = top = cp->indx; top < NUM_ENT(cp->page); top += P_INDX)
+ if (!IS_DUPLICATE(dbc, cp->indx, top))
+ break;
+ if (base == (top - P_INDX)) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ return (cmp == 0 ||
+ (cmp < 0 && flags == DB_GET_BOTH_RANGE) ? 0 : DB_NOTFOUND);
+ }
+
+ for (lim = (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) {
+ cp->indx = base + ((lim >> 1) * P_INDX);
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0) {
+ /*
+ * XXX
+ * No duplicate duplicates in sorted duplicate sets,
+ * so there can be only one.
+ */
+ if (!IS_CUR_DELETED(dbc))
+ return (0);
+ break;
+ }
+ if (cmp > 0) {
+ base = cp->indx + P_INDX;
+ --lim;
+ }
+ }
+
+ /* No match found; if we're looking for an exact match, we're done. */
+ if (flags == DB_GET_BOTH)
+ return (DB_NOTFOUND);
+
+ /*
+ * Base is the smallest index greater than the data item, may be zero
+ * or a last + O_INDX index, and may be deleted. Find an undeleted
+ * item.
+ */
+ cp->indx = base;
+ while (cp->indx < top && IS_CUR_DELETED(dbc))
+ cp->indx += P_INDX;
+ return (cp->indx < top ? 0 : DB_NOTFOUND);
+}
+
+/*
+ * __bam_c_put --
+ * Put using a cursor.
+ */
+static int
+__bam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t root_pgno;
+ u_int32_t iiop;
+ int cmp, exact, ret, stack;
+ void *arg;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+split: ret = stack = 0;
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ iiop = flags;
+
+ /*
+ * If the Btree has record numbers (and we're not replacing an
+ * existing record), we need a complete stack so that we can
+ * adjust the record counts. The check for flags == DB_CURRENT
+ * is superfluous but left in for clarity. (If C_RECNUM is set
+ * we know that flags must be DB_CURRENT, as DB_AFTER/DB_BEFORE
+ * are illegal in a Btree unless it's configured for duplicates
+ * and you cannot configure a Btree for both record renumbering
+ * and duplicates.)
+ */
+ if (flags == DB_CURRENT &&
+ F_ISSET(cp, C_RECNUM) && F_ISSET(cp, C_DELETED)) {
+ if ((ret = __bam_c_getstack(dbc)) != 0)
+ goto err;
+ /*
+ * Initialize the cursor from the stack. Don't take
+ * the page number or page index, they should already
+ * be set.
+ */
+ cp->page = cp->csp->page;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ stack = 1;
+ break;
+ }
+
+ /* Acquire the current page with a write lock. */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ goto err;
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ /*
+ * Searching off-page, sorted duplicate tree: do a tree search
+ * for the correct item; __bam_c_search returns the smallest
+ * slot greater than the key, use it.
+ *
+ * See comment below regarding where we can start the search.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno,
+ data, flags, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (exact) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /*
+ * Searching a btree.
+ *
+ * If we've done a split, we can start the search from the
+ * parent of the split page, which __bam_split returned
+ * for us in root_pgno, unless we're in a Btree with record
+ * numbering. In that case, we'll need the true root page
+ * in order to adjust the record count.
+ */
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno, key,
+ flags == DB_KEYFIRST || dbp->dup_compare != NULL ?
+ DB_KEYFIRST : DB_KEYLAST, &exact)) != 0)
+ goto err;
+ stack = 1;
+
+ /*
+ * If we don't have an exact match, __bam_c_search returned
+ * the smallest slot greater than the key, use it.
+ */
+ if (!exact) {
+ iiop = DB_KEYFIRST;
+ break;
+ }
+
+ /*
+ * If duplicates aren't supported, replace the current item.
+ * (If implementing the DB->put function, our caller already
+ * checked the DB_NOOVERWRITE flag.)
+ */
+ if (!F_ISSET(dbp, DB_AM_DUP)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+
+ /*
+ * If we find a matching entry, it may be an off-page duplicate
+ * tree. Return the page number to our caller, we need a new
+ * cursor.
+ */
+ if (pgnop != NULL && __bam_isopd(dbc, pgnop))
+ goto done;
+
+ /* If the duplicates aren't sorted, move to the right slot. */
+ if (dbp->dup_compare == NULL) {
+ if (flags == DB_KEYFIRST)
+ iiop = DB_BEFORE;
+ else
+ for (;; cp->indx += P_INDX)
+ if (cp->indx + P_INDX >=
+ NUM_ENT(cp->page) ||
+ !IS_DUPLICATE(dbc, cp->indx,
+ cp->indx + P_INDX)) {
+ iiop = DB_AFTER;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * We know that we're looking at the first of a set of sorted
+ * on-page duplicates. Walk the list to find the right slot.
+ */
+ for (;; cp->indx += P_INDX) {
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ goto err;
+ if (cmp < 0) {
+ iiop = DB_BEFORE;
+ break;
+ }
+
+ /* Disallow "sorted" duplicate duplicates. */
+ if (cmp == 0) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
+ ret = __db_duperr(dbp, flags);
+ goto err;
+ }
+
+ if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx] !=
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx + P_INDX]) {
+ iiop = DB_AFTER;
+ break;
+ }
+ }
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__bam_c_put", flags);
+ goto err;
+ }
+
+ switch (ret = __bam_iitem(dbc, key, data, iiop, 0)) {
+ case 0:
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * To split, we need a key for the page. Either use the key
+ * argument or get a copy of the key from the page.
+ */
+ if (flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT) {
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page, 0, &dbt,
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+ arg = &dbt;
+ } else
+ arg = F_ISSET(dbc, DBC_OPD) ? data : key;
+
+ /*
+ * Discard any locks and pinned pages (the locks are discarded
+ * even if we're running with transactions, as they lock pages
+ * that we're sorry we ever acquired). If stack is set and the
+ * cursor entries are valid, they point to the same entries as
+ * the stack, don't free them twice.
+ */
+ if (stack)
+ ret = __bam_stkrel(dbc, STK_CLRDBC | STK_NOLOCK);
+ else
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ goto err;
+
+ /* Split the tree. */
+ if ((ret = __bam_split(dbc, arg, &root_pgno)) != 0)
+ return (ret);
+
+ goto split;
+ default:
+ goto err;
+ }
+
+err:
+done: /*
+ * Discard any pages pinned in the tree and their locks, except for
+ * the leaf page. Note, the leaf page participated in any stack we
+ * acquired, and so we have to adjust the stack as necessary. If
+ * there was only a single page on the stack, we don't have to free
+ * further stack pages.
+ */
+ if (stack && BT_STK_POP(cp) != NULL)
+ (void)__bam_stkrel(dbc, 0);
+
+ /*
+ * Regardless of whether we were successful or not, clear the delete
+ * flag. If we're successful, we either moved the cursor or the item
+ * is no longer deleted. If we're not successful, then we're just a
+ * copy, no need to have the flag set.
+ */
+ F_CLR(cp, C_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_rget --
+ * Return the record number for a cursor.
+ *
+ * PUBLIC: int __bam_c_rget __P((DBC *, DBT *));
+ */
+int
+__bam_c_rget(dbc, data)
+ DBC *dbc;
+ DBT *data;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ db_recno_t recno;
+ int exact, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it.
+ * Get a copy of the key.
+ * Release the page, making sure we don't release it twice.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ cp->indx, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+ ret = mpf->put(mpf, cp->page, 0);
+ cp->page = NULL;
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __bam_search(dbc, PGNO_INVALID, &dbt,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &recno, &exact)) != 0)
+ goto err;
+
+ ret = __db_retcopy(dbp->dbenv, data,
+ &recno, sizeof(recno), &dbc->rdata->data, &dbc->rdata->ulen);
+
+ /* Release the stack. */
+err: __bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_writelock --
+ * Upgrade the cursor to a write lock.
+ */
+static int
+__bam_c_writelock(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ if (cp->lock_mode == DB_LOCK_WRITE)
+ return (0);
+
+ /*
+ * When writing to an off-page duplicate tree, we need to have the
+ * appropriate page in the primary tree locked. The general DBC
+ * code calls us first with the primary cursor so we can acquire the
+ * appropriate lock.
+ */
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ return (ret);
+}
+
+/*
+ * __bam_c_first --
+ * Return the first record.
+ */
+static int
+__bam_c_first(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the left-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page, 0)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ cp->indx = 0;
+
+ /* If on an empty page or a deleted record, move to the next one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_last --
+ * Return the last record.
+ */
+static int
+__bam_c_last(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /* Walk down the right-hand side of the tree. */
+ for (pgno = cp->root;;) {
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ /* If we find a leaf page, we're done. */
+ if (ISLEAF(cp->page))
+ break;
+
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page,
+ NUM_ENT(cp->page) - O_INDX)->pgno;
+ }
+
+ /* If we want a write lock instead of a read lock, get it now. */
+ if (F_ISSET(dbc, DBC_RMW)) {
+ ACQUIRE_WRITE_LOCK(dbc, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ cp->indx = NUM_ENT(cp->page) == 0 ? 0 :
+ NUM_ENT(cp->page) -
+ (TYPE(cp->page) == P_LBTREE ? P_INDX : O_INDX);
+
+ /* If on an empty page or a deleted record, move to the previous one. */
+ if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_c_next --
+ * Move to the next record.
+ */
+static int
+__bam_c_next(dbc, initial_move, deleted_okay)
+ DBC *dbc;
+ int initial_move, deleted_okay;
+{
+ BTREE_CURSOR *cp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (initial_move)
+ cp->indx += adjust;
+
+ for (;;) {
+ /*
+ * If at the end of the page, move to a subsequent page.
+ *
+ * !!!
+ * Check for >= NUM_ENT. If the original search landed us on
+ * NUM_ENT, we may have incremented indx before the test.
+ */
+ if (cp->indx >= NUM_ENT(cp->page)) {
+ if ((pgno
+ = NEXT_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+ cp->indx = 0;
+ continue;
+ }
+ if (!deleted_okay && IS_CUR_DELETED(dbc)) {
+ cp->indx += adjust;
+ continue;
+ }
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_prev --
+ * Move to the previous record.
+ */
+static int
+__bam_c_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ db_indx_t adjust;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * We're either moving through a page of duplicates or a btree leaf
+ * page.
+ *
+ * !!!
+ * This code handles empty pages and pages with only deleted entries.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ adjust = O_INDX;
+ lock_mode = DB_LOCK_NG;
+ } else {
+ adjust = dbc->dbtype == DB_BTREE ? P_INDX : O_INDX;
+ lock_mode =
+ F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ }
+ if (cp->page == NULL) {
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
+ if (ret != 0)
+ return (ret);
+ }
+
+ for (;;) {
+ /* If at the beginning of the page, move to a previous one. */
+ if (cp->indx == 0) {
+ if ((pgno =
+ PREV_PGNO(cp->page)) == PGNO_INVALID)
+ return (DB_NOTFOUND);
+
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
+ if (ret != 0)
+ return (ret);
+
+ if ((cp->indx = NUM_ENT(cp->page)) == 0)
+ continue;
+ }
+
+ /* Ignore deleted records. */
+ cp->indx -= adjust;
+ if (IS_CUR_DELETED(dbc))
+ continue;
+
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __bam_c_search --
+ * Move to a specified record.
+ */
+static int
+__bam_c_search(dbc, root_pgno, key, flags, exactp)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ const DBT *key;
+ u_int32_t flags;
+ int *exactp;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ PAGE *h;
+ db_indx_t indx, *inp;
+ db_pgno_t bt_lpgno;
+ db_recno_t recno;
+ u_int32_t sflags;
+ int cmp, ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ret = 0;
+
+ /*
+ * Find an entry in the database. Discard any lock we currently hold,
+ * we're going to search the tree.
+ */
+ DISCARD_CUR(dbc, ret);
+ if (ret != 0)
+ return (ret);
+
+ switch (flags) {
+ case DB_SET_RECNO:
+ if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0)
+ return (ret);
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ if ((ret = __bam_rsearch(dbc, &recno, sflags, 1, exactp)) != 0)
+ return (ret);
+ break;
+ case DB_SET:
+ case DB_GET_BOTH:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
+ goto search;
+ case DB_GET_BOTH_RANGE:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND);
+ goto search;
+ case DB_SET_RANGE:
+ sflags =
+ (F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_DUPFIRST;
+ goto search;
+ case DB_KEYFIRST:
+ sflags = S_KEYFIRST;
+ goto fast_search;
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ sflags = S_KEYLAST;
+fast_search: /*
+ * If the application has a history of inserting into the first
+ * or last pages of the database, we check those pages first to
+ * avoid doing a full search.
+ *
+ * If the tree has record numbers, we need a complete stack so
+ * that we can adjust the record counts, so fast_search isn't
+ * possible.
+ */
+ if (F_ISSET(cp, C_RECNUM))
+ goto search;
+
+ /*
+ * !!!
+ * We do not mutex protect the t->bt_lpgno field, which means
+ * that it can only be used in an advisory manner. If we find
+ * page we can use, great. If we don't, we don't care, we do
+ * it the slow way instead. Regardless, copy it into a local
+ * variable, otherwise we might acquire a lock for a page and
+ * then read a different page because it changed underfoot.
+ */
+ bt_lpgno = t->bt_lpgno;
+
+ /*
+ * If the tree has no history of insertion, do it the slow way.
+ */
+ if (bt_lpgno == PGNO_INVALID)
+ goto search;
+
+ /* Lock and retrieve the page on which we last inserted. */
+ h = NULL;
+ ACQUIRE(dbc,
+ DB_LOCK_WRITE, bt_lpgno, cp->lock, bt_lpgno, h, ret);
+ if (ret != 0)
+ goto fast_miss;
+
+ inp = P_INP(dbp, h);
+ /*
+ * It's okay if the page type isn't right or it's empty, it
+ * just means that the world changed.
+ */
+ if (TYPE(h) != P_LBTREE || NUM_ENT(h) == 0)
+ goto fast_miss;
+
+ /*
+ * What we do here is test to see if we're at the beginning or
+ * end of the tree and if the new item sorts before/after the
+ * first/last page entry. We don't try and catch inserts into
+ * the middle of the tree (although we could, as long as there
+ * were two keys on the page and we saved both the index and
+ * the page number of the last insert).
+ */
+ if (h->next_pgno == PGNO_INVALID) {
+ indx = NUM_ENT(h) - P_INDX;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp < 0)
+ goto try_begin;
+ if (cmp > 0) {
+ indx += P_INDX;
+ goto fast_hit;
+ }
+
+ /*
+ * Found a duplicate. If doing DB_KEYLAST, we're at
+ * the correct position, otherwise, move to the first
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYLAST)
+ goto fast_hit;
+ for (;
+ indx > 0 && inp[indx - P_INDX] == inp[indx];
+ indx -= P_INDX)
+ ;
+ goto fast_hit;
+ }
+try_begin: if (h->prev_pgno == PGNO_INVALID) {
+ indx = 0;
+ if ((ret = __bam_cmp(dbp,
+ key, h, indx, t->bt_compare, &cmp)) != 0)
+ return (ret);
+
+ if (cmp > 0)
+ goto fast_miss;
+ if (cmp < 0)
+ goto fast_hit;
+
+ /*
+ * Found a duplicate. If doing DB_KEYFIRST, we're at
+ * the correct position, otherwise, move to the last
+ * of the duplicates. If we're looking at off-page
+ * duplicates, duplicate duplicates aren't permitted,
+ * so we're done.
+ */
+ if (flags == DB_KEYFIRST)
+ goto fast_hit;
+ for (;
+ indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ inp[indx] == inp[indx + P_INDX];
+ indx += P_INDX)
+ ;
+ goto fast_hit;
+ }
+ goto fast_miss;
+
+fast_hit: /* Set the exact match flag, we may have found a duplicate. */
+ *exactp = cmp == 0;
+
+ /*
+ * Insert the entry in the stack. (Our caller is likely to
+ * call __bam_stkrel() after our return.)
+ */
+ BT_STK_CLR(cp);
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, cp->lock, cp->lock_mode, ret);
+ if (ret != 0)
+ return (ret);
+ break;
+
+fast_miss: /*
+ * This was not the right page, so we do not need to retain
+ * the lock even in the presence of transactions.
+ */
+ DISCARD(dbc, 1, cp->lock, h, ret);
+ if (ret != 0)
+ return (ret);
+
+search: if ((ret = __bam_search(dbc, root_pgno,
+ key, sflags, 1, NULL, exactp)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_c_search", flags));
+ }
+
+ /* Initialize the cursor from the stack. */
+ cp->page = cp->csp->page;
+ cp->pgno = cp->csp->page->pgno;
+ cp->indx = cp->csp->indx;
+ cp->lock = cp->csp->lock;
+ cp->lock_mode = cp->csp->lock_mode;
+
+ /*
+ * If we inserted a key into the first or last slot of the tree,
+ * remember where it was so we can do it more quickly next time.
+ * If there are duplicates and we are inserting into the last slot,
+ * the cursor will point _to_ the last item, not after it, which
+ * is why we subtract P_INDX below.
+ */
+ if (TYPE(cp->page) == P_LBTREE &&
+ (flags == DB_KEYFIRST || flags == DB_KEYLAST))
+ t->bt_lpgno =
+ (NEXT_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx >= NUM_ENT(cp->page) - P_INDX) ||
+ (PREV_PGNO(cp->page) == PGNO_INVALID &&
+ cp->indx == 0) ? cp->pgno : PGNO_INVALID;
+ return (0);
+}
+
+/*
+ * __bam_c_physdel --
+ * Physically remove an item from the page.
+ */
+static int
+__bam_c_physdel(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT key;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int delete_page, empty_page, exact, level, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ delete_page = empty_page = ret = 0;
+
+ /* If the page is going to be emptied, consider deleting it. */
+ delete_page = empty_page =
+ NUM_ENT(cp->page) == (TYPE(cp->page) == P_LBTREE ? 2 : 1);
+
+ /*
+ * Check if the application turned off reverse splits. Applications
+ * can't turn off reverse splits in off-page duplicate trees, that
+ * space will never be reused unless the exact same key is specified.
+ */
+ if (delete_page &&
+ !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_AM_REVSPLITOFF))
+ delete_page = 0;
+
+ /*
+ * We never delete the last leaf page. (Not really true -- we delete
+ * the last leaf page of off-page duplicate trees, but that's handled
+ * by our caller, not down here.)
+ */
+ if (delete_page && cp->pgno == cp->root)
+ delete_page = 0;
+
+ /*
+ * To delete a leaf page other than an empty root page, we need a
+ * copy of a key from the page. Use the 0th page index since it's
+ * the last key the page held.
+ *
+ * !!!
+ * Note that because __bam_c_physdel is always called from a cursor
+ * close, it should be safe to use the cursor's own "my_rkey" memory
+ * to temporarily hold this key. We shouldn't own any returned-data
+ * memory of interest--if we do, we're in trouble anyway.
+ */
+ if (delete_page) {
+ memset(&key, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp, cp->page,
+ 0, &key, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Delete the items. If page isn't empty, we adjust the cursors.
+ *
+ * !!!
+ * The following operations to delete a page may deadlock. The easy
+ * scenario is if we're deleting an item because we're closing cursors
+ * because we've already deadlocked and want to call txn->abort. If
+ * we fail due to deadlock, we'll leave a locked, possibly empty page
+ * in the tree, which won't be empty long because we'll undo the delete
+ * when we undo the transaction's modifications.
+ *
+ * !!!
+ * Delete the key item first, otherwise the on-page duplicate checks
+ * in __bam_ditem() won't work!
+ */
+ if (TYPE(cp->page) == P_LBTREE) {
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+ }
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ return (ret);
+ if (!empty_page)
+ if ((ret = __bam_ca_di(dbc, PGNO(cp->page), cp->indx, -1)) != 0)
+ return (ret);
+
+ /* If we're not going to try and delete the page, we're done. */
+ if (!delete_page)
+ return (0);
+
+ /*
+ * Call __bam_search to reacquire the empty leaf page, but this time
+ * get both the leaf page and it's parent, locked. Jump back up the
+ * tree, until we have the top pair of pages that we want to delete.
+ * Once we have the top page that we want to delete locked, lock the
+ * underlying pages and check to make sure they're still empty. If
+ * they are, delete them.
+ */
+ for (level = LEAFLEVEL;; ++level) {
+ /* Acquire a page and its parent, locked. */
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ &key, S_WRPAIR, level, NULL, &exact)) != 0)
+ return (ret);
+
+ /*
+ * If we reach the root or the parent page isn't going to be
+ * empty when we delete one record, stop.
+ */
+ h = cp->csp[-1].page;
+ if (h->pgno == cp->root || NUM_ENT(h) != 1)
+ break;
+
+ /* Discard the stack, retaining no locks. */
+ (void)__bam_stkrel(dbc, STK_NOLOCK);
+ }
+
+ /*
+ * Move the stack pointer one after the last entry, we may be about
+ * to push more items onto the page stack.
+ */
+ ++cp->csp;
+
+ /*
+ * cp->csp[-2].page is now the parent page, which we may or may not be
+ * going to delete, and cp->csp[-1].page is the first page we know we
+ * are going to delete. Walk down the chain of pages, acquiring pages
+ * until we've acquired a leaf page. Generally, this shouldn't happen;
+ * we should only see a single internal page with one item and a single
+ * leaf page with no items. The scenario where we could see something
+ * else is if reverse splits were turned off for awhile and then turned
+ * back on. That could result in all sorts of strangeness, e.g., empty
+ * pages in the tree, trees that looked like linked lists, and so on.
+ *
+ * !!!
+ * Sheer paranoia: if we find any pages that aren't going to be emptied
+ * by the delete, someone else added an item while we were walking the
+ * tree, and we discontinue the delete. Shouldn't be possible, but we
+ * check regardless.
+ */
+ for (h = cp->csp[-1].page;;) {
+ if (ISLEAF(h)) {
+ if (NUM_ENT(h) != 0)
+ break;
+ break;
+ } else
+ if (NUM_ENT(h) != 1)
+ break;
+
+ /*
+ * Get the next page, write lock it and push it onto the stack.
+ * We know it's index 0, because it can only have one element.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ pgno = GET_BINTERNAL(dbp, h, 0)->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(dbp, h, 0)->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &lock)) != 0)
+ break;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ break;
+ BT_STK_PUSH(dbp->dbenv, cp, h, 0, lock, DB_LOCK_WRITE, ret);
+ if (ret != 0)
+ break;
+ }
+
+ /* Adjust the cursor stack to reference the last page on the stack. */
+ BT_STK_POP(cp);
+
+ /*
+ * If everything worked, delete the stack, otherwise, release the
+ * stack and page locks without further damage.
+ */
+ if (ret == 0)
+ ret = __bam_dpages(dbc, cp->sp);
+ else
+ (void)__bam_stkrel(dbc, 0);
+
+ return (ret);
+}
+
+/*
+ * __bam_c_getstack --
+ * Acquire a full stack for a cursor.
+ */
+static int
+__bam_c_getstack(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Get the page with the current item on it. The caller of this
+ * routine has to already hold a read lock on the page, so there
+ * is no additional lock to acquire.
+ */
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
+ return (ret);
+
+ /* Get a copy of a key from the page. */
+ memset(&dbt, 0, sizeof(DBT));
+ if ((ret = __db_ret(dbp,
+ h, 0, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err;
+
+ /* Get a write-locked stack for the page. */
+ exact = 0;
+ ret = __bam_search(dbc, PGNO_INVALID,
+ &dbt, S_KEYFIRST, 1, NULL, &exact);
+
+err: /* Discard the key and the page. */
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_isopd --
+ * Return if the cursor references an off-page duplicate tree via its
+ * page number.
+ */
+static int
+__bam_isopd(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ BOVERFLOW *bo;
+
+ if (TYPE(dbc->internal->page) != P_LBTREE)
+ return (0);
+
+ bo = GET_BOVERFLOW(dbc->dbp,
+ dbc->internal->page, dbc->internal->indx + O_INDX);
+ if (B_TYPE(bo->type) == B_DUPLICATE) {
+ *pgnop = bo->pgno;
+ return (1);
+ }
+ return (0);
+}
diff --git a/storage/bdb/btree/bt_delete.c b/storage/bdb/btree/bt_delete.c
new file mode 100644
index 00000000000..8c76ead2922
--- /dev/null
+++ b/storage/bdb/btree/bt_delete.c
@@ -0,0 +1,460 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_delete.c,v 11.44 2002/07/03 19:03:49 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_ditem --
+ * Delete one or more entries from a page.
+ *
+ * PUBLIC: int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+ */
+int
+__bam_ditem(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ u_int32_t nbytes;
+ int ret;
+ db_indx_t *inp;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, indx);
+ switch (B_TYPE(bi->type)) {
+ case B_DUPLICATE:
+ case B_KEYDATA:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ break;
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_SIZE(bi->len);
+ if ((ret =
+ __db_doff(dbc, ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ case P_LBTREE:
+ /*
+ * If it's a duplicate key, discard the index and don't touch
+ * the actual page item.
+ *
+ * !!!
+ * This works because no data item can have an index matching
+ * any other index so even if the data item is in a key "slot",
+ * it won't match any other index.
+ */
+ if ((indx % 2) == 0) {
+ /*
+ * Check for a duplicate after us on the page. NOTE:
+ * we have to delete the key item before deleting the
+ * data item, otherwise the "indx + P_INDX" calculation
+ * won't work!
+ */
+ if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
+ inp[indx] == inp[indx + P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx + O_INDX, 0));
+ /*
+ * Check for a duplicate before us on the page. It
+ * doesn't matter if we delete the key item before or
+ * after the data item for the purposes of this one.
+ */
+ if (indx > 0 && inp[indx] == inp[indx - P_INDX])
+ return (__bam_adjindx(dbc,
+ h, indx, indx - P_INDX, 0));
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case B_OVERFLOW:
+ nbytes = BOVERFLOW_SIZE;
+ if ((ret = __db_doff(
+ dbc, (GET_BOVERFLOW(dbp, h, indx))->pgno)) != 0)
+ return (ret);
+ break;
+ case B_KEYDATA:
+ nbytes = BKEYDATA_SIZE(bk->len);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
+ }
+
+ /* Delete the item and mark the page dirty. */
+ if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0)
+ return (ret);
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_adjindx --
+ * Adjust an index on the page.
+ *
+ * PUBLIC: int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+ */
+int
+__bam_adjindx(dbc, h, indx, indx_copy, is_insert)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx, indx_copy;
+ int is_insert;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ db_indx_t copy, *inp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_adj_log(dbp, dbc->txn, &LSN(h), 0,
+ PGNO(h), &LSN(h), indx, indx_copy, (u_int32_t)is_insert)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ /* Shuffle the indices and mark the page dirty. */
+ if (is_insert) {
+ copy = inp[indx_copy];
+ if (indx != NUM_ENT(h))
+ memmove(&inp[indx + O_INDX], &inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ inp[indx] = copy;
+ ++NUM_ENT(h);
+ } else {
+ --NUM_ENT(h);
+ if (indx != NUM_ENT(h))
+ memmove(&inp[indx], &inp[indx + O_INDX],
+ sizeof(db_indx_t) * (NUM_ENT(h) - indx));
+ }
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __bam_dpages --
+ * Delete a set of locked pages.
+ *
+ * PUBLIC: int __bam_dpages __P((DBC *, EPG *));
+ */
+int
+__bam_dpages(dbc, stack_epg)
+ DBC *dbc;
+ EPG *stack_epg;
+{
+ BTREE_CURSOR *cp;
+ BINTERNAL *bi;
+ DB *dbp;
+ DBT a, b;
+ DB_LOCK c_lock, p_lock;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ PAGE *child, *parent;
+ db_indx_t nitems;
+ db_pgno_t pgno, root_pgno;
+ db_recno_t rcnt;
+ int done, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * We have the entire stack of deletable pages locked.
+ *
+ * Btree calls us with a pointer to the beginning of a stack, where
+ * the first page in the stack is to have a single item deleted, and
+ * the rest of the pages are to be removed.
+ *
+ * Recno calls us with a pointer into the middle of the stack, where
+ * the referenced page is to have a single item deleted, and pages
+ * after the stack reference are to be removed.
+ *
+ * First, discard any pages that we don't care about.
+ */
+ ret = 0;
+ for (epg = cp->sp; epg < stack_epg; ++epg) {
+ if ((t_ret = mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ if (ret != 0)
+ goto err;
+
+ /*
+ * !!!
+ * There is an interesting deadlock situation here. We have to relink
+ * the leaf page chain around the leaf page being deleted. Consider
+ * a cursor walking through the leaf pages, that has the previous page
+ * read-locked and is waiting on a lock for the page we're deleting.
+ * It will deadlock here. Before we unlink the subtree, we relink the
+ * leaf page chain.
+ */
+ if ((ret = __db_relink(dbc, DB_REM_PAGE, cp->csp->page, NULL, 1)) != 0)
+ goto err;
+
+ /*
+ * Delete the last item that references the underlying pages that are
+ * to be deleted, and adjust cursors that reference that page. Then,
+ * save that page's page number and item count and release it. If
+ * the application isn't retaining locks because it's running without
+ * transactions, this lets the rest of the tree get back to business
+ * immediately.
+ */
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ if ((ret = __bam_ca_di(dbc, PGNO(epg->page), epg->indx, -1)) != 0)
+ goto err;
+
+ pgno = PGNO(epg->page);
+ nitems = NUM_ENT(epg->page);
+
+ if ((ret = mpf->put(mpf, epg->page, 0)) != 0)
+ goto err_inc;
+ (void)__TLPUT(dbc, epg->lock);
+
+ /* Free the rest of the pages in the stack. */
+ while (++epg <= cp->csp) {
+ /*
+ * Delete page entries so they will be restored as part of
+ * recovery. We don't need to do cursor adjustment here as
+ * the pages are being emptied by definition and so cannot
+ * be referenced by a cursor.
+ */
+ if (NUM_ENT(epg->page) != 0) {
+ DB_ASSERT(NUM_ENT(epg->page) == 1);
+
+ if ((ret = __bam_ditem(dbc, epg->page, epg->indx)) != 0)
+ goto err;
+ }
+
+ if ((ret = __db_free(dbc, epg->page)) != 0) {
+ epg->page = NULL;
+ goto err_inc;
+ }
+ (void)__TLPUT(dbc, epg->lock);
+ }
+
+ if (0) {
+err_inc: ++epg;
+err: for (; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL)
+ (void)mpf->put(mpf, epg->page, 0);
+ (void)__TLPUT(dbc, epg->lock);
+ }
+ BT_STK_CLR(cp);
+ return (ret);
+ }
+ BT_STK_CLR(cp);
+
+ /*
+ * If we just deleted the next-to-last item from the root page, the
+ * tree can collapse one or more levels. While there remains only a
+ * single item on the root page, write lock the last page referenced
+ * by the root page and copy it over the root page.
+ */
+ root_pgno = cp->root;
+ if (pgno != root_pgno || nitems != 1)
+ return (0);
+
+ for (done = 0; !done;) {
+ /* Initialize. */
+ parent = child = NULL;
+ LOCK_INIT(p_lock);
+ LOCK_INIT(c_lock);
+
+ /* Lock the root. */
+ pgno = root_pgno;
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0)
+ goto stop;
+ if ((ret = mpf->get(mpf, &pgno, 0, &parent)) != 0)
+ goto stop;
+
+ if (NUM_ENT(parent) != 1)
+ goto stop;
+
+ switch (TYPE(parent)) {
+ case P_IBTREE:
+ /*
+ * If this is overflow, then try to delete it.
+ * The child may or may not still point at it.
+ */
+ bi = GET_BINTERNAL(dbp, parent, 0);
+ if (B_TYPE(bi->type) == B_OVERFLOW)
+ if ((ret = __db_doff(dbc,
+ ((BOVERFLOW *)bi->data)->pgno)) != 0)
+ goto stop;
+ pgno = bi->pgno;
+ break;
+ case P_IRECNO:
+ pgno = GET_RINTERNAL(dbp, parent, 0)->pgno;
+ break;
+ default:
+ goto stop;
+ }
+
+ /* Lock the child page. */
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0)
+ goto stop;
+ if ((ret = mpf->get(mpf, &pgno, 0, &child)) != 0)
+ goto stop;
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&a, 0, sizeof(a));
+ a.data = child;
+ a.size = dbp->pgsize;
+ memset(&b, 0, sizeof(b));
+ b.data = P_ENTRY(dbp, parent, 0);
+ b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE :
+ BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
+ if ((ret = __bam_rsplit_log(dbp, dbc->txn,
+ &child->lsn, 0, PGNO(child), &a, PGNO(parent),
+ RE_NREC(parent), &b, &parent->lsn)) != 0)
+ goto stop;
+ } else
+ LSN_NOT_LOGGED(child->lsn);
+
+ /*
+ * Make the switch.
+ *
+ * One fixup -- internal pages below the top level do not store
+ * a record count, so we have to preserve it if we're not
+ * converting to a leaf page. Note also that we are about to
+ * overwrite the parent page, including its LSN. This is OK
+ * because the log message we wrote describing this update
+ * stores its LSN on the child page. When the child is copied
+ * onto the parent, the correct LSN is copied into place.
+ */
+ COMPQUIET(rcnt, 0);
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ rcnt = RE_NREC(parent);
+ memcpy(parent, child, dbp->pgsize);
+ PGNO(parent) = root_pgno;
+ if (F_ISSET(cp, C_RECNUM) && LEVEL(child) > LEAFLEVEL)
+ RE_NREC_SET(parent, rcnt);
+
+ /* Mark the pages dirty. */
+ if ((ret = mpf->set(mpf, parent, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+ if ((ret = mpf->set(mpf, child, DB_MPOOL_DIRTY)) != 0)
+ goto stop;
+
+ /* Adjust the cursors. */
+ if ((ret = __bam_ca_rsplit(dbc, PGNO(child), root_pgno)) != 0)
+ goto stop;
+
+ /*
+ * Free the page copied onto the root page and discard its
+ * lock. (The call to __db_free() discards our reference
+ * to the page.)
+ */
+ if ((ret = __db_free(dbc, child)) != 0) {
+ child = NULL;
+ goto stop;
+ }
+ child = NULL;
+
+ if (0) {
+stop: done = 1;
+ }
+ (void)__TLPUT(dbc, p_lock);
+ if (parent != NULL &&
+ (t_ret = mpf->put(mpf, parent, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, c_lock);
+ if (child != NULL &&
+ (t_ret = mpf->put(mpf, child, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_method.c b/storage/bdb/btree/bt_method.c
new file mode 100644
index 00000000000..aa27ed6bab9
--- /dev/null
+++ b/storage/bdb/btree/bt_method.c
@@ -0,0 +1,388 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_method.c,v 11.29 2002/04/21 13:17:04 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/qam.h"
+
+static int __bam_set_bt_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static int __bam_set_bt_maxkey __P((DB *, u_int32_t));
+static int __bam_set_bt_minkey __P((DB *, u_int32_t));
+static int __bam_set_bt_prefix
+ __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+static int __ram_set_re_delim __P((DB *, int));
+static int __ram_set_re_len __P((DB *, u_int32_t));
+static int __ram_set_re_pad __P((DB *, int));
+static int __ram_set_re_source __P((DB *, const char *));
+
+/*
+ * __bam_db_create --
+ * Btree specific initialization of the DB structure.
+ *
+ * PUBLIC: int __bam_db_create __P((DB *));
+ */
+int
+__bam_db_create(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ int ret;
+
+ /* Allocate and initialize the private btree structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(BTREE), &t)) != 0)
+ return (ret);
+ dbp->bt_internal = t;
+
+ t->bt_minkey = DEFMINKEYPAGE; /* Btree */
+ t->bt_compare = __bam_defcmp;
+ t->bt_prefix = __bam_defpfx;
+
+ dbp->set_bt_compare = __bam_set_bt_compare;
+ dbp->set_bt_maxkey = __bam_set_bt_maxkey;
+ dbp->set_bt_minkey = __bam_set_bt_minkey;
+ dbp->set_bt_prefix = __bam_set_bt_prefix;
+
+ t->re_pad = ' '; /* Recno */
+ t->re_delim = '\n';
+ t->re_eof = 1;
+
+ dbp->set_re_delim = __ram_set_re_delim;
+ dbp->set_re_len = __ram_set_re_len;
+ dbp->set_re_pad = __ram_set_re_pad;
+ dbp->set_re_source = __ram_set_re_source;
+
+ return (0);
+}
+
+/*
+ * __bam_db_close --
+ * Btree specific discard of the DB structure.
+ *
+ * PUBLIC: int __bam_db_close __P((DB *));
+ */
+int
+__bam_db_close(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+
+ if ((t = dbp->bt_internal) == NULL)
+ return (0);
+ /* Recno */
+ /* Close any backing source file descriptor. */
+ if (t->re_fp != NULL)
+ (void)fclose(t->re_fp);
+
+ /* Free any backing source file name. */
+ if (t->re_source != NULL)
+ __os_free(dbp->dbenv, t->re_source);
+
+ __os_free(dbp->dbenv, t);
+ dbp->bt_internal = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_flags --
+ * Set Btree specific flags.
+ *
+ * PUBLIC: int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__bam_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_DUP | DB_DUPSORT | DB_RECNUM | DB_REVSPLITOFF)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ /*
+ * The DB_DUP and DB_DUPSORT flags are shared by the Hash
+ * and Btree access methods.
+ */
+ if (LF_ISSET(DB_DUP | DB_DUPSORT))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ if (LF_ISSET(DB_RECNUM | DB_REVSPLITOFF))
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ if (LF_ISSET(DB_DUP | DB_DUPSORT)) {
+ /* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ goto incompat;
+
+ if (LF_ISSET(DB_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ }
+
+ F_SET(dbp, DB_AM_DUP);
+ LF_CLR(DB_DUP | DB_DUPSORT);
+ }
+
+ if (LF_ISSET(DB_RECNUM)) {
+ /* DB_RECNUM is incompatible with DB_DUP/DB_DUPSORT. */
+ if (F_ISSET(dbp, DB_AM_DUP))
+ goto incompat;
+
+ F_SET(dbp, DB_AM_RECNUM);
+ LF_CLR(DB_RECNUM);
+ }
+
+ if (LF_ISSET(DB_REVSPLITOFF)) {
+ F_SET(dbp, DB_AM_REVSPLITOFF);
+ LF_CLR(DB_REVSPLITOFF);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+
+incompat:
+ return (__db_ferr(dbp->dbenv, "DB->set_flags", 1));
+}
+
+/*
+ * __bam_set_bt_compare --
+ * Set the comparison function.
+ */
+static int
+__bam_set_bt_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ /*
+ * Can't default the prefix routine if the user supplies a comparison
+ * routine; shortening the keys can break their comparison algorithm.
+ */
+ t->bt_compare = func;
+ if (t->bt_prefix == __bam_defpfx)
+ t->bt_prefix = NULL;
+
+ return (0);
+}
+
+/*
+ * __bam_set_bt_maxkey --
+ * Set the maximum keys per page.
+ */
+static int
+__bam_set_bt_maxkey(dbp, bt_maxkey)
+ DB *dbp;
+ u_int32_t bt_maxkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_maxkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_maxkey < 1) {
+ __db_err(dbp->dbenv, "minimum bt_maxkey value is 1");
+ return (EINVAL);
+ }
+
+ t->bt_maxkey = bt_maxkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_minkey --
+ * Set the minimum keys per page.
+ */
+static int
+__bam_set_bt_minkey(dbp, bt_minkey)
+ DB *dbp;
+ u_int32_t bt_minkey;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_minkey");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ if (bt_minkey < 2) {
+ __db_err(dbp->dbenv, "minimum bt_minkey value is 2");
+ return (EINVAL);
+ }
+
+ t->bt_minkey = bt_minkey;
+ return (0);
+}
+
+/*
+ * __bam_set_bt_prefix --
+ * Set the prefix function.
+ */
+static int
+__bam_set_bt_prefix(dbp, func)
+ DB *dbp;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_bt_prefix");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ t = dbp->bt_internal;
+
+ t->bt_prefix = func;
+ return (0);
+}
+
+/*
+ * __ram_set_flags --
+ * Set Recno specific flags.
+ *
+ * PUBLIC: int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+ */
+int
+__ram_set_flags(dbp, flagsp)
+ DB *dbp;
+ u_int32_t *flagsp;
+{
+ u_int32_t flags;
+
+ flags = *flagsp;
+ if (LF_ISSET(DB_RENUMBER | DB_SNAPSHOT)) {
+ DB_ILLEGAL_AFTER_OPEN(dbp, "DB->set_flags");
+
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ if (LF_ISSET(DB_RENUMBER)) {
+ F_SET(dbp, DB_AM_RENUMBER);
+ LF_CLR(DB_RENUMBER);
+ }
+
+ if (LF_ISSET(DB_SNAPSHOT)) {
+ F_SET(dbp, DB_AM_SNAPSHOT);
+ LF_CLR(DB_SNAPSHOT);
+ }
+
+ *flagsp = flags;
+ }
+ return (0);
+}
+
+/*
+ * __ram_set_re_delim --
+ * Set the variable-length input record delimiter.
+ */
+static int
+__ram_set_re_delim(dbp, re_delim)
+ DB *dbp;
+ int re_delim;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_delim");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ t->re_delim = re_delim;
+ F_SET(dbp, DB_AM_DELIMITER);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_len --
+ * Set the variable-length input record length.
+ */
+static int
+__ram_set_re_len(dbp, re_len)
+ DB *dbp;
+ u_int32_t re_len;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_len");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_len = re_len;
+
+ q = dbp->q_internal;
+ q->re_len = re_len;
+
+ F_SET(dbp, DB_AM_FIXEDLEN);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_pad --
+ * Set the fixed-length record pad character.
+ */
+static int
+__ram_set_re_pad(dbp, re_pad)
+ DB *dbp;
+ int re_pad;
+{
+ BTREE *t;
+ QUEUE *q;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_pad");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+ t->re_pad = re_pad;
+
+ q = dbp->q_internal;
+ q->re_pad = re_pad;
+
+ F_SET(dbp, DB_AM_PAD);
+
+ return (0);
+}
+
+/*
+ * __ram_set_re_source --
+ * Set the backing source file name.
+ */
+static int
+__ram_set_re_source(dbp, re_source)
+ DB *dbp;
+ const char *re_source;
+{
+ BTREE *t;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_re_source");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+
+ t = dbp->bt_internal;
+
+ return (__os_strdup(dbp->dbenv, re_source, &t->re_source));
+}
diff --git a/storage/bdb/btree/bt_open.c b/storage/bdb/btree/bt_open.c
new file mode 100644
index 00000000000..24da41e9893
--- /dev/null
+++ b/storage/bdb/btree/bt_open.c
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_open.c,v 11.76 2002/09/04 19:06:42 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+
+static void __bam_init_meta __P((DB *, BTMETA *, db_pgno_t, DB_LSN *));
+
+/*
+ * __bam_open --
+ * Open a btree.
+ *
+ * PUBLIC: int __bam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+
+ COMPQUIET(name, NULL);
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->key_range = __bam_key_range;
+ dbp->stat = __bam_stat;
+
+ /*
+ * We don't permit the user to specify a prefix routine if they didn't
+ * also specify a comparison routine, they can't know enough about our
+ * comparison routine to get it right.
+ */
+ if (t->bt_compare == __bam_defcmp && t->bt_prefix != __bam_defpfx) {
+ __db_err(dbp->dbenv,
+"prefix comparison may not be specified for default comparison routine");
+ return (EINVAL);
+ }
+
+ /*
+ * Verify that the bt_minkey value specified won't cause the
+ * calculation of ovflsize to underflow [#2406] for this pagesize.
+ */
+ if (B_MINKEY_TO_OVFLSIZE(dbp, t->bt_minkey, dbp->pgsize) >
+ B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
+ __db_err(dbp->dbenv,
+ "bt_minkey value of %lu too high for page size of %lu",
+ (u_long)t->bt_minkey, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
+ /* Start up the tree. */
+ return (__bam_read_root(dbp, txn, base_pgno, flags));
+}
+
+/*
+ * __bam_metachk --
+ *
+ * PUBLIC: int __bam_metachk __P((DB *, const char *, BTMETA *));
+ */
+int
+__bam_metachk(dbp, name, btm)
+ DB *dbp;
+ const char *name;
+ BTMETA *btm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Btree.
+ * Check the version, the database may be out of date.
+ */
+ vers = btm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 6:
+ case 7:
+ __db_err(dbenv,
+ "%s: btree version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 8:
+ case 9:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported btree version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __bam_mswap((PAGE *)btm)) != 0)
+ return (ret);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret =
+ __db_fchk(dbenv, "DB->open", btm->dbmeta.flags, BTM_MASK)) != 0)
+ return (ret);
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNO)) {
+ if (dbp->type == DB_BTREE)
+ goto wrong_type;
+ dbp->type = DB_RECNO;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
+ } else {
+ if (dbp->type == DB_RECNO)
+ goto wrong_type;
+ dbp->type = DB_BTREE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) {
+ if (dbp->type != DB_BTREE)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_RECNUM);
+
+ if ((ret = __db_fcchk(dbenv,
+ "DB->open", dbp->flags, DB_AM_DUP, DB_AM_RECNUM)) != 0)
+ return (ret);
+ } else
+ if (F_ISSET(dbp, DB_AM_RECNUM)) {
+ __db_err(dbenv,
+ "%s: DB_RECNUM specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_FIXEDLEN);
+ } else
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ __db_err(dbenv,
+ "%s: DB_FIXEDLEN specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) {
+ if (dbp->type != DB_RECNO)
+ goto wrong_type;
+ F_SET(dbp, DB_AM_RENUMBER);
+ } else
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
+ __db_err(dbenv,
+ "%s: DB_RENUMBER specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported by file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&btm->dbmeta, BTM_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ F_SET(dbp, DB_AM_DUPSORT);
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort specified but not supported in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = btm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, btm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+
+wrong_type:
+ if (dbp->type == DB_BTREE)
+ __db_err(dbenv,
+ "open method type is Btree, database type is Recno");
+ else
+ __db_err(dbenv,
+ "open method type is Recno, database type is Btree");
+ return (EINVAL);
+}
+
+/*
+ * __bam_read_root --
+ * Read the root page and check a tree.
+ *
+ * PUBLIC: int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
+ */
+int
+__bam_read_root(dbp, txn, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ meta = NULL;
+ t = dbp->bt_internal;
+ LOCK_INIT(metalock);
+ mpf = dbp->mpf;
+ ret = 0;
+
+ /* Get a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Get the metadata page. */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &base_pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ /*
+ * If the magic number is set, the tree has been created. Correct
+ * any fields that may not be right. Note, all of the local flags
+ * were set by DB->open.
+ *
+ * Otherwise, we'd better be in recovery or abort, in which case the
+ * metadata page will be created/initialized elsewhere.
+ */
+ DB_ASSERT(meta->dbmeta.magic != 0 ||
+ IS_RECOVERING(dbp->dbenv) || F_ISSET(dbp, DB_AM_RECOVER));
+
+ t->bt_maxkey = meta->maxkey;
+ t->bt_minkey = meta->minkey;
+ t->re_pad = meta->re_pad;
+ t->re_len = meta->re_len;
+
+ t->bt_meta = base_pgno;
+ t->bt_root = meta->root;
+
+ /*
+ * !!!
+ * If creating a subdatabase, we've already done an insert when
+ * we put the subdatabase's entry into the master database, so
+ * our last-page-inserted value is wrongly initialized for the
+ * master database, not the subdatabase we're creating. I'm not
+ * sure where the *right* place to clear this value is, it's not
+ * intuitively obvious that it belongs here.
+ */
+ t->bt_lpgno = PGNO_INVALID;
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!LF_ISSET(DB_RDONLY) && dbp->meta_pgno == PGNO_BASE_MD) {
+ mpf->last_pgno(mpf, &meta->dbmeta.last_pgno);
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ ret = mpf->put(mpf, meta, 0);
+ meta = NULL;
+
+err: /* Put the metadata page back. */
+ if (meta != NULL && (t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __bam_init_meta --
+ *
+ * Initialize a btree meta-data page. The following fields may need
+ * to be updated later: last_pgno, root.
+ */
+static void
+__bam_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ BTREE *t;
+
+ memset(meta, 0, sizeof(BTMETA));
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
+ meta->dbmeta.magic = DB_BTREEMAGIC;
+ meta->dbmeta.version = DB_BTREEVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_BTREEMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, BTM_DUP);
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
+ F_SET(&meta->dbmeta, BTM_FIXEDLEN);
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ F_SET(&meta->dbmeta, BTM_RECNUM);
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ F_SET(&meta->dbmeta, BTM_RENUMBER);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, BTM_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, BTM_DUPSORT);
+ if (dbp->type == DB_RECNO)
+ F_SET(&meta->dbmeta, BTM_RECNO);
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ t = dbp->bt_internal;
+ meta->maxkey = t->bt_maxkey;
+ meta->minkey = t->bt_minkey;
+ meta->re_len = t->re_len;
+ meta->re_pad = t->re_pad;
+}
+
+/*
+ * __bam_new_file --
+ * Create the necessary pages to begin a new database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__bam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ BTMETA *meta;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ PAGE *root;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ root = NULL;
+ meta = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (BTMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->root = 1;
+ meta->dbmeta.last_pgno = 1;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now build root page. */
+ if (name == NULL) {
+ pgno = 1;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, 0, dbp->pgsize);
+#endif
+ root = (PAGE *)buf;
+ }
+
+ P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID,
+ LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE);
+ LSN_NOT_LOGGED(root->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, root, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn,
+ name, DB_APP_DATA, fhp, dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ root = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (root != NULL)
+ (void)mpf->put(mpf, root, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __bam_new_subdb --
+ * Create a metadata page and a root page for a new btree.
+ *
+ * PUBLIC: int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__bam_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ BTMETA *meta;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *root;
+ int ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ root = NULL;
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get, and optionally create the metadata page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Build meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ __bam_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Create and initialize a root page. */
+ if ((ret = __db_new(dbc,
+ dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE, &root)) != 0)
+ goto err;
+ root->level = LEAFLEVEL;
+
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __bam_root_log(mdbp, txn, &meta->dbmeta.lsn, 0,
+ meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0)
+ goto err;
+
+ meta->root = root->pgno;
+ if ((ret =
+ __db_log_page(mdbp, txn, &root->lsn, root->pgno, root)) != 0)
+ goto err;
+
+ /* Release the metadata and root pages. */
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ if ((ret = mpf->put(mpf, root, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ root = NULL;
+err:
+ if (meta != NULL)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (root != NULL)
+ if ((t_ret = mpf->put(mpf, root, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_put.c b/storage/bdb/btree/bt_put.c
new file mode 100644
index 00000000000..39bd2024e76
--- /dev/null
+++ b/storage/bdb/btree/bt_put.c
@@ -0,0 +1,854 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_put.c,v 11.69 2002/08/06 06:11:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __bam_build
+ __P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t));
+static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t));
+static int __bam_ovput
+ __P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *));
+static u_int32_t
+ __bam_partsize __P((DB *, u_int32_t, DBT *, PAGE *, u_int32_t));
+
+/*
+ * __bam_iitem --
+ * Insert an item into the tree.
+ *
+ * PUBLIC: int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+ */
+int
+__bam_iitem(dbc, key, data, op, flags)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t op, flags;
+{
+ BKEYDATA *bk, bk_tmp;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT bk_hdr, tdbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t indx;
+ u_int32_t data_size, have_bytes, need_bytes, needed;
+ int cmp, bigkey, bigdata, dupadjust, padrec, replace, ret, was_deleted;
+
+ COMPQUIET(bk, NULL);
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ h = cp->page;
+ indx = cp->indx;
+ dupadjust = replace = was_deleted = 0;
+
+ /*
+ * Fixed-length records with partial puts: it's an error to specify
+ * anything other simple overwrite.
+ */
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ F_ISSET(data, DB_DBT_PARTIAL) && data->dlen != data->size) {
+ data_size = data->size;
+ goto len_err;
+ }
+
+ /*
+ * Figure out how much space the data will take, including if it's a
+ * partial record.
+ *
+ * Fixed-length records: it's an error to specify a record that's
+ * longer than the fixed-length, and we never require less than
+ * the fixed-length record size.
+ */
+ data_size = F_ISSET(data, DB_DBT_PARTIAL) ?
+ __bam_partsize(dbp, op, data, h, indx) : data->size;
+ padrec = 0;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ if (data_size > t->re_len) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)data_size);
+ return (EINVAL);
+ }
+
+ /* Records that are deleted anyway needn't be padded out. */
+ if (!LF_ISSET(BI_DELETED) && data_size < t->re_len) {
+ padrec = 1;
+ data_size = t->re_len;
+ }
+ }
+
+ /*
+ * Handle partial puts or short fixed-length records: build the
+ * real record.
+ */
+ if (padrec || F_ISSET(data, DB_DBT_PARTIAL)) {
+ tdbt = *data;
+ if ((ret =
+ __bam_build(dbc, op, &tdbt, h, indx, data_size)) != 0)
+ return (ret);
+ data = &tdbt;
+ }
+
+ /*
+ * If the user has specified a duplicate comparison function, return
+ * an error if DB_CURRENT was specified and the replacement data
+ * doesn't compare equal to the current data. This stops apps from
+ * screwing up the duplicate sort order. We have to do this after
+ * we build the real record so that we're comparing the real items.
+ */
+ if (op == DB_CURRENT && dbp->dup_compare != NULL) {
+ if ((ret = __bam_cmp(dbp, data, h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
+ dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp != 0) {
+ __db_err(dbp->dbenv,
+ "Current data differs from put data");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If the key or data item won't fit on a page, we'll have to store
+ * them on overflow pages.
+ */
+ needed = 0;
+ bigdata = data_size > cp->ovflsize;
+ switch (op) {
+ case DB_KEYFIRST:
+ /* We're adding a new key and data pair. */
+ bigkey = key->size > cp->ovflsize;
+ if (bigkey)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(key->size);
+ if (bigdata)
+ needed += BOVERFLOW_PSIZE;
+ else
+ needed += BKEYDATA_PSIZE(data_size);
+ break;
+ case DB_AFTER:
+ case DB_BEFORE:
+ case DB_CURRENT:
+ /*
+ * We're either overwriting the data item of a key/data pair
+ * or we're creating a new on-page duplicate and only adding
+ * a data item.
+ *
+ * !!!
+ * We're not currently correcting for space reclaimed from
+ * already deleted items, but I don't think it's worth the
+ * complexity.
+ */
+ bigkey = 0;
+ if (op == DB_CURRENT) {
+ bk = GET_BKEYDATA(dbp, h,
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ if (B_TYPE(bk->type) == B_KEYDATA)
+ have_bytes = BKEYDATA_PSIZE(bk->len);
+ else
+ have_bytes = BOVERFLOW_PSIZE;
+ need_bytes = 0;
+ } else {
+ have_bytes = 0;
+ need_bytes = sizeof(db_indx_t);
+ }
+ if (bigdata)
+ need_bytes += BOVERFLOW_PSIZE;
+ else
+ need_bytes += BKEYDATA_PSIZE(data_size);
+
+ if (have_bytes < need_bytes)
+ needed += need_bytes - have_bytes;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /*
+ * If there's not enough room, or the user has put a ceiling on the
+ * number of keys permitted in the page, split the page.
+ *
+ * XXX
+ * The t->bt_maxkey test here may be insufficient -- do we have to
+ * check in the btree split code, so we don't undo it there!?!?
+ */
+ if (P_FREESPACE(dbp, h) < needed ||
+ (t->bt_maxkey != 0 && NUM_ENT(h) > t->bt_maxkey))
+ return (DB_NEEDSPLIT);
+
+ /*
+ * The code breaks it up into five cases:
+ *
+ * 1. Insert a new key/data pair.
+ * 2. Append a new data item (a new duplicate).
+ * 3. Insert a new data item (a new duplicate).
+ * 4. Delete and re-add the data item (overflow item).
+ * 5. Overwrite the data item.
+ */
+ switch (op) {
+ case DB_KEYFIRST: /* 1. Insert a new key/data pair. */
+ if (bigkey) {
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, key)) != 0)
+ return (ret);
+ } else
+ if ((ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(key->size), NULL, key)) != 0)
+ return (ret);
+
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ ++indx;
+ break;
+ case DB_AFTER: /* 2. Append a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret =
+ __bam_adjindx(dbc, h, indx + P_INDX, indx, 1)) != 0)
+ return (ret);
+ if ((ret =
+ __bam_ca_di(dbc, PGNO(h), indx + P_INDX, 1)) != 0)
+ return (ret);
+
+ indx += 3;
+ dupadjust = 1;
+
+ cp->indx += 2;
+ } else {
+ ++indx;
+ cp->indx += 1;
+ }
+ break;
+ case DB_BEFORE: /* 3. Insert a new data item. */
+ if (TYPE(h) == P_LBTREE) {
+ /* Copy the key for the duplicate and adjust cursors. */
+ if ((ret = __bam_adjindx(dbc, h, indx, indx, 1)) != 0)
+ return (ret);
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+
+ ++indx;
+ dupadjust = 1;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * Clear the cursor's deleted flag. The problem is that if
+ * we deadlock or fail while deleting the overflow item or
+ * replacing the non-overflow item, a subsequent cursor close
+ * will try and remove the item because the cursor's delete
+ * flag is set
+ */
+ (void)__bam_ca_delete(dbp, PGNO(h), indx, 0);
+
+ if (TYPE(h) == P_LBTREE) {
+ ++indx;
+ dupadjust = 1;
+
+ /*
+ * In a Btree deleted records aren't counted (deleted
+ * records are counted in a Recno because all accesses
+ * are based on record number). If it's a Btree and
+ * it's a DB_CURRENT operation overwriting a previously
+ * deleted record, increment the record count.
+ */
+ was_deleted = B_DISSET(bk->type);
+ }
+
+ /*
+ * 4. Delete and re-add the data item.
+ *
+ * If we're changing the type of the on-page structure, or we
+ * are referencing offpage items, we have to delete and then
+ * re-add the item. We do not do any cursor adjustments here
+ * because we're going to immediately re-add the item into the
+ * same slot.
+ */
+ if (bigdata || B_TYPE(bk->type) != B_KEYDATA) {
+ if ((ret = __bam_ditem(dbc, h, indx)) != 0)
+ return (ret);
+ break;
+ }
+
+ /* 5. Overwrite the data item. */
+ replace = 1;
+ break;
+ default:
+ return (__db_unknown_flag(dbp->dbenv, "__bam_iitem", op));
+ }
+
+ /* Add the data. */
+ if (bigdata) {
+ /*
+ * We do not have to handle deleted (BI_DELETED) records
+ * in this case; the actual records should never be created.
+ */
+ DB_ASSERT(!LF_ISSET(BI_DELETED));
+ if ((ret = __bam_ovput(dbc,
+ B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0)
+ return (ret);
+ } else {
+ if (LF_ISSET(BI_DELETED)) {
+ B_TSET(bk_tmp.type, B_KEYDATA, 1);
+ bk_tmp.len = data->size;
+ bk_hdr.data = &bk_tmp;
+ bk_hdr.size = SSZA(BKEYDATA, data);
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), &bk_hdr, data);
+ } else if (replace)
+ ret = __bam_ritem(dbc, h, indx, data);
+ else
+ ret = __db_pitem(dbc, h, indx,
+ BKEYDATA_SIZE(data->size), NULL, data);
+ if (ret != 0)
+ return (ret);
+ }
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ /*
+ * Re-position the cursors if necessary and reset the current cursor
+ * to point to the new item.
+ */
+ if (op != DB_CURRENT) {
+ if ((ret = __bam_ca_di(dbc, PGNO(h), indx, 1)) != 0)
+ return (ret);
+ cp->indx = TYPE(h) == P_LBTREE ? indx - O_INDX : indx;
+ }
+
+ /*
+ * If we've changed the record count, update the tree. There's no
+ * need to adjust the count if the operation not performed on the
+ * current record or when the current record was previously deleted.
+ */
+ if (F_ISSET(cp, C_RECNUM) && (op != DB_CURRENT || was_deleted))
+ if ((ret = __bam_adjust(dbc, 1)) != 0)
+ return (ret);
+
+ /*
+ * If a Btree leaf page is at least 50% full and we may have added or
+ * modified a duplicate data item, see if the set of duplicates takes
+ * up at least 25% of the space on the page. If it does, move it onto
+ * its own page.
+ */
+ if (dupadjust && P_FREESPACE(dbp, h) <= dbp->pgsize / 2) {
+ if ((ret = __bam_dup_convert(dbc, h, indx - O_INDX)) != 0)
+ return (ret);
+ }
+
+ /* If we've modified a recno file, set the flag. */
+ if (dbc->dbtype == DB_RECNO)
+ t->re_modified = 1;
+
+ return (ret);
+}
+
+/*
+ * __bam_partsize --
+ * Figure out how much space a partial data item is in total.
+ */
+static u_int32_t
+__bam_partsize(dbp, op, data, h, indx)
+ DB *dbp;
+ u_int32_t op, indx;
+ DBT *data;
+ PAGE *h;
+{
+ BKEYDATA *bk;
+ u_int32_t nbytes;
+
+ /*
+ * If the record doesn't already exist, it's simply the data we're
+ * provided.
+ */
+ if (op != DB_CURRENT)
+ return (data->doff + data->size);
+
+ /*
+ * Otherwise, it's the data provided plus any already existing data
+ * that we're not replacing.
+ */
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ nbytes =
+ B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len;
+
+ return (__db_partsize(nbytes, data));
+}
+
+/*
+ * __bam_build --
+ * Build the real record for a partial put, or short fixed-length record.
+ */
+static int
+__bam_build(dbc, op, dbt, h, indx, nbytes)
+ DBC *dbc;
+ u_int32_t op, indx, nbytes;
+ DBT *dbt;
+ PAGE *h;
+{
+ BKEYDATA *bk, tbk;
+ BOVERFLOW *bo;
+ BTREE *t;
+ DB *dbp;
+ DBT copy, *rdata;
+ u_int32_t len, tlen;
+ u_int8_t *p;
+ int ret;
+
+ COMPQUIET(bo, NULL);
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+
+ /* We use the record data return memory, it's only a short-term use. */
+ rdata = &dbc->my_rdata;
+ if (rdata->ulen < nbytes) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ nbytes, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ }
+ rdata->ulen = nbytes;
+ }
+
+ /*
+ * We use nul or pad bytes for any part of the record that isn't
+ * specified; get it over with.
+ */
+ memset(rdata->data,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_pad : 0, nbytes);
+
+ /*
+ * In the next clauses, we need to do three things: a) set p to point
+ * to the place at which to copy the user's data, b) set tlen to the
+ * total length of the record, not including the bytes contributed by
+ * the user, and c) copy any valid data from an existing record. If
+ * it's not a partial put (this code is called for both partial puts
+ * and fixed-length record padding) or it's a new key, we can cut to
+ * the chase.
+ */
+ if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) {
+ p = (u_int8_t *)rdata->data + dbt->doff;
+ tlen = dbt->doff;
+ goto user_copy;
+ }
+
+ /* Find the current record. */
+ if (indx < NUM_ENT(h)) {
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ?
+ O_INDX : 0));
+ bo = (BOVERFLOW *)bk;
+ } else {
+ bk = &tbk;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = 0;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ /*
+ * In the case of an overflow record, we shift things around
+ * in the current record rather than allocate a separate copy.
+ */
+ memset(&copy, 0, sizeof(copy));
+ if ((ret = __db_goff(dbp, &copy, bo->tlen,
+ bo->pgno, &rdata->data, &rdata->ulen)) != 0)
+ return (ret);
+
+ /* Skip any leading data from the original record. */
+ tlen = dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
+
+ /*
+ * Copy in any trailing data from the original record.
+ *
+ * If the original record was larger than the original offset
+ * plus the bytes being deleted, there is trailing data in the
+ * original record we need to preserve. If we aren't deleting
+ * the same number of bytes as we're inserting, copy it up or
+ * down, into place.
+ *
+ * Use memmove(), the regions may overlap.
+ */
+ if (bo->tlen > dbt->doff + dbt->dlen) {
+ len = bo->tlen - (dbt->doff + dbt->dlen);
+ if (dbt->dlen != dbt->size)
+ memmove(p + dbt->size, p + dbt->dlen, len);
+ tlen += len;
+ }
+ } else {
+ /* Copy in any leading data from the original record. */
+ memcpy(rdata->data,
+ bk->data, dbt->doff > bk->len ? bk->len : dbt->doff);
+ tlen = dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
+
+ /* Copy in any trailing data from the original record. */
+ len = dbt->doff + dbt->dlen;
+ if (bk->len > len) {
+ memcpy(p + dbt->size, bk->data + len, bk->len - len);
+ tlen += bk->len - len;
+ }
+ }
+
+user_copy:
+ /*
+ * Copy in the application provided data -- p and tlen must have been
+ * initialized above.
+ */
+ memcpy(p, dbt->data, dbt->size);
+ tlen += dbt->size;
+
+ /* Set the DBT to reference our new record. */
+ rdata->size = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : tlen;
+ rdata->dlen = 0;
+ rdata->doff = 0;
+ rdata->flags = 0;
+ *dbt = *rdata;
+ return (0);
+}
+
+/*
+ * __bam_ritem --
+ * Replace an item on a page.
+ *
+ * PUBLIC: int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+ */
+int
+__bam_ritem(dbc, h, indx, data)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *data;
+{
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT orig, repl;
+ db_indx_t cnt, lo, ln, min, off, prefix, suffix;
+ int32_t nbytes;
+ int ret;
+ db_indx_t *inp;
+ u_int8_t *p, *t;
+
+ dbp = dbc->dbp;
+
+ /*
+ * Replace a single item onto a page. The logic figuring out where
+ * to insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling.
+ */
+ bk = GET_BKEYDATA(dbp, h, indx);
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ /*
+ * We might as well check to see if the two data items share
+ * a common prefix and suffix -- it can save us a lot of log
+ * message if they're large.
+ */
+ min = data->size < bk->len ? data->size : bk->len;
+ for (prefix = 0,
+ p = bk->data, t = data->data;
+ prefix < min && *p == *t; ++prefix, ++p, ++t)
+ ;
+
+ min -= prefix;
+ for (suffix = 0,
+ p = (u_int8_t *)bk->data + bk->len - 1,
+ t = (u_int8_t *)data->data + data->size - 1;
+ suffix < min && *p == *t; ++suffix, --p, --t)
+ ;
+
+ /* We only log the parts of the keys that have changed. */
+ orig.data = (u_int8_t *)bk->data + prefix;
+ orig.size = bk->len - (prefix + suffix);
+ repl.data = (u_int8_t *)data->data + prefix;
+ repl.size = data->size - (prefix + suffix);
+ if ((ret = __bam_repl_log(dbp, dbc->txn, &LSN(h), 0, PGNO(h),
+ &LSN(h), (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
+ &orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ /*
+ * Set references to the first in-use byte on the page and the
+ * first byte of the item being replaced.
+ */
+ inp = P_INP(dbp, h);
+ p = (u_int8_t *)h + HOFFSET(h);
+ t = (u_int8_t *)bk;
+
+ /*
+ * If the entry is growing in size, shift the beginning of the data
+ * part of the page down. If the entry is shrinking in size, shift
+ * the beginning of the data part of the page up. Use memmove(3),
+ * the regions overlap.
+ */
+ lo = BKEYDATA_SIZE(bk->len);
+ ln = (db_indx_t)BKEYDATA_SIZE(data->size);
+ if (lo != ln) {
+ nbytes = lo - ln; /* Signed difference. */
+ if (p == t) /* First index is fast. */
+ inp[indx] += nbytes;
+ else { /* Else, shift the page. */
+ memmove(p + nbytes, p, t - p);
+
+ /* Adjust the indices' offsets. */
+ off = inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(h); ++cnt)
+ if (inp[cnt] <= off)
+ inp[cnt] += nbytes;
+ }
+
+ /* Clean up the page and adjust the item's reference. */
+ HOFFSET(h) += nbytes;
+ t += nbytes;
+ }
+
+ /* Copy the new item onto the page. */
+ bk = (BKEYDATA *)t;
+ B_TSET(bk->type, B_KEYDATA, 0);
+ bk->len = data->size;
+ memcpy(bk->data, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __bam_dup_convert --
+ * Check to see if the duplicate set at indx should have its own page.
+ * If it should, create it.
+ */
+static int
+__bam_dup_convert(dbc, h, indx)
+ DBC *dbc;
+ PAGE *h;
+ u_int32_t indx;
+{
+ BKEYDATA *bk;
+ DB *dbp;
+ DBT hdr;
+ DB_MPOOLFILE *mpf;
+ PAGE *dp;
+ db_indx_t cnt, cpindx, dindx, first, *inp, sz;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
+
+ /*
+ * Count the duplicate records and calculate how much room they're
+ * using on the page.
+ */
+ while (indx > 0 && inp[indx] == inp[indx - P_INDX])
+ indx -= P_INDX;
+ for (cnt = 0, sz = 0, first = indx;; ++cnt, indx += P_INDX) {
+ if (indx >= NUM_ENT(h) || inp[first] != inp[indx])
+ break;
+ bk = GET_BKEYDATA(dbp, h, indx);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
+ sz += B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
+ }
+
+ /*
+ * We have to do these checks when the user is replacing the cursor's
+ * data item -- if the application replaces a duplicate item with a
+ * larger data item, it can increase the amount of space used by the
+ * duplicates, requiring this check. But that means we may have done
+ * this check when it wasn't a duplicate item after all.
+ */
+ if (cnt == 1)
+ return (0);
+
+ /*
+ * If this set of duplicates is using more than 25% of the page, move
+ * them off. The choice of 25% is a WAG, but the value must be small
+ * enough that we can always split a page without putting duplicates
+ * on two different pages.
+ */
+ if (sz < dbp->pgsize / 4)
+ return (0);
+
+ /* Get a new page. */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize, dp->pgno,
+ PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Move this set of duplicates off the page. First points to the first
+ * key of the first duplicate key/data pair, cnt is the number of pairs
+ * we're dealing with.
+ */
+ memset(&hdr, 0, sizeof(hdr));
+ dindx = first;
+ indx = first;
+ cpindx = 0;
+ do {
+ /* Move cursors referencing the old entry to the new entry. */
+ if ((ret = __bam_ca_dup(dbc, first,
+ PGNO(h), indx, PGNO(dp), cpindx)) != 0)
+ goto err;
+
+ /*
+ * Copy the entry to the new page. If the off-duplicate page
+ * If the off-duplicate page is a Btree page (i.e. dup_compare
+ * will be non-NULL, we use Btree pages for sorted dups,
+ * and Recno pages for unsorted dups), move all entries
+ * normally, even deleted ones. If it's a Recno page,
+ * deleted entries are discarded (if the deleted entry is
+ * overflow, then free up those pages).
+ */
+ bk = GET_BKEYDATA(dbp, h, dindx + 1);
+ hdr.data = bk;
+ hdr.size = B_TYPE(bk->type) == B_KEYDATA ?
+ BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE;
+ if (dbp->dup_compare == NULL && B_DISSET(bk->type)) {
+ /*
+ * Unsorted dups, i.e. recno page, and we have
+ * a deleted entry, don't move it, but if it was
+ * an overflow entry, we need to free those pages.
+ */
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_doff(dbc,
+ (GET_BOVERFLOW(dbp, h, dindx + 1))->pgno)) != 0)
+ goto err;
+ } else {
+ if ((ret = __db_pitem(
+ dbc, dp, cpindx, hdr.size, &hdr, NULL)) != 0)
+ goto err;
+ ++cpindx;
+ }
+ /* Delete all but the last reference to the key. */
+ if (cnt != 1) {
+ if ((ret = __bam_adjindx(dbc,
+ h, dindx, first + 1, 0)) != 0)
+ goto err;
+ } else
+ dindx++;
+
+ /* Delete the data item. */
+ if ((ret = __db_ditem(dbc, h, dindx, hdr.size)) != 0)
+ goto err;
+ indx += P_INDX;
+ } while (--cnt);
+
+ /* Put in a new data item that points to the duplicates page. */
+ if ((ret = __bam_ovput(dbc,
+ B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
+ goto err;
+
+ /* Adjust cursors for all the above movments. */
+ if ((ret = __bam_ca_di(dbc,
+ PGNO(h), first + P_INDX, first + P_INDX - indx)) != 0)
+ goto err;
+
+ return (mpf->put(mpf, dp, DB_MPOOL_DIRTY));
+
+err: (void)mpf->put(mpf, dp, 0);
+ return (ret);
+}
+
+/*
+ * __bam_ovput --
+ * Build an item for an off-page duplicates page or overflow page and
+ * insert it on the page.
+ */
+static int
+__bam_ovput(dbc, type, pgno, h, indx, item)
+ DBC *dbc;
+ u_int32_t type, indx;
+ db_pgno_t pgno;
+ PAGE *h;
+ DBT *item;
+{
+ BOVERFLOW bo;
+ DBT hdr;
+ int ret;
+
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, type, 0);
+ UMRW_SET(bo.unused2);
+
+ /*
+ * If we're creating an overflow item, do so and acquire the page
+ * number for it. If we're creating an off-page duplicates tree,
+ * we are giving the page number as an argument.
+ */
+ if (type == B_OVERFLOW) {
+ if ((ret = __db_poff(dbc, item, &bo.pgno)) != 0)
+ return (ret);
+ bo.tlen = item->size;
+ } else {
+ bo.pgno = pgno;
+ bo.tlen = 0;
+ }
+
+ /* Store the new record on the page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bo;
+ hdr.size = BOVERFLOW_SIZE;
+ return (__db_pitem(dbc, h, indx, BOVERFLOW_SIZE, &hdr, NULL));
+}
diff --git a/storage/bdb/btree/bt_rec.c b/storage/bdb/btree/bt_rec.c
new file mode 100644
index 00000000000..b6443547aa5
--- /dev/null
+++ b/storage/bdb/btree/bt_rec.c
@@ -0,0 +1,971 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_rec.c,v 11.57 2002/08/06 16:53:53 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+#define IS_BTREE_PAGE(pagep) \
+ (TYPE(pagep) == P_IBTREE || \
+ TYPE(pagep) == P_LBTREE || TYPE(pagep) == P_LDUP)
+
+/*
+ * __bam_split_recover --
+ * Recovery function for split.
+ *
+ * PUBLIC: int __bam_split_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_split_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_split_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp;
+ db_pgno_t pgno, root_pgno;
+ u_int32_t ptype;
+ int cmp, l_update, p_update, r_update, rc, ret, ret_l, rootsplit, t_ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_split_print);
+
+ mpf = NULL;
+ _lp = lp = np = pp = _rp = rp = NULL;
+ sp = NULL;
+
+ REC_INTRO(__bam_split_read, 1);
+
+ /*
+ * There are two kinds of splits that we have to recover from. The
+ * first is a root-page split, where the root page is split from a
+ * leaf page into an internal page and two new leaf pages are created.
+ * The second is where a page is split into two pages, and a new key
+ * is inserted into the parent page.
+ *
+ * DBTs are not aligned in log records, so we need to copy the page
+ * so that we can access fields within it throughout this routine.
+ * Although we could hardcode the unaligned copies in this routine,
+ * we will be calling into regular btree functions with this page,
+ * so it's got to be aligned. Copying it into allocated memory is
+ * the only way to guarantee this.
+ */
+ if ((ret = __os_malloc(dbenv, argp->pg.size, &sp)) != 0)
+ goto out;
+ memcpy(sp, argp->pg.data, argp->pg.size);
+
+ pgno = PGNO(sp);
+ root_pgno = argp->root_pgno;
+ rootsplit = root_pgno != PGNO_INVALID;
+ if ((ret_l = mpf->get(mpf, &argp->left, 0, &lp)) != 0)
+ lp = NULL;
+ if (mpf->get(mpf, &argp->right, 0, &rp) != 0)
+ rp = NULL;
+
+ if (DB_REDO(op)) {
+ l_update = r_update = p_update = 0;
+ /*
+ * Decide if we need to resplit the page.
+ *
+ * If this is a root split, then the root has to exist, it's
+ * the page we're splitting and it gets modified. If this is
+ * not a root split, then the left page has to exist, for the
+ * same reason.
+ */
+ if (rootsplit) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
+ __db_pgerr(file_dbp, pgno, ret);
+ pp = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(pp), &LSN(argp->pg.data));
+ CHECK_LSN(op, cmp, &LSN(pp), &LSN(argp->pg.data));
+ p_update = cmp == 0;
+ } else if (lp == NULL) {
+ __db_pgerr(file_dbp, argp->left, ret_l);
+ goto out;
+ }
+
+ if (lp != NULL) {
+ cmp = log_compare(&LSN(lp), &argp->llsn);
+ CHECK_LSN(op, cmp, &LSN(lp), &argp->llsn);
+ if (cmp == 0)
+ l_update = 1;
+ } else
+ l_update = 1;
+
+ if (rp != NULL) {
+ cmp = log_compare(&LSN(rp), &argp->rlsn);
+ CHECK_LSN(op, cmp, &LSN(rp), &argp->rlsn);
+ if (cmp == 0)
+ r_update = 1;
+ } else
+ r_update = 1;
+ if (!p_update && !l_update && !r_update)
+ goto check_next;
+
+ /* Allocate and initialize new left/right child pages. */
+ if ((ret = __os_malloc(dbenv, file_dbp->pgsize, &_lp)) != 0 ||
+ (ret = __os_malloc(dbenv, file_dbp->pgsize, &_rp)) != 0)
+ goto out;
+ if (rootsplit) {
+ P_INIT(_lp, file_dbp->pgsize, argp->left,
+ PGNO_INVALID,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->left,
+ PGNO_INVALID, LEVEL(sp), TYPE(sp));
+ } else {
+ P_INIT(_lp, file_dbp->pgsize, PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : PREV_PGNO(sp),
+ ISINTERNAL(sp) ? PGNO_INVALID : argp->right,
+ LEVEL(sp), TYPE(sp));
+ P_INIT(_rp, file_dbp->pgsize, argp->right,
+ ISINTERNAL(sp) ? PGNO_INVALID : sp->pgno,
+ ISINTERNAL(sp) ? PGNO_INVALID : NEXT_PGNO(sp),
+ LEVEL(sp), TYPE(sp));
+ }
+
+ /* Split the page. */
+ if ((ret = __bam_copy(file_dbp, sp, _lp, 0, argp->indx)) != 0 ||
+ (ret = __bam_copy(file_dbp, sp, _rp, argp->indx,
+ NUM_ENT(sp))) != 0)
+ goto out;
+
+ /* If the left child is wrong, update it. */
+ if (lp == NULL && (ret = mpf->get(
+ mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) {
+ __db_pgerr(file_dbp, argp->left, ret);
+ lp = NULL;
+ goto out;
+ }
+ if (l_update) {
+ memcpy(lp, _lp, file_dbp->pgsize);
+ lp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+
+ /* If the right child is wrong, update it. */
+ if (rp == NULL && (ret = mpf->get(
+ mpf, &argp->right, DB_MPOOL_CREATE, &rp)) != 0) {
+ __db_pgerr(file_dbp, argp->right, ret);
+ rp = NULL;
+ goto out;
+ }
+ if (r_update) {
+ memcpy(rp, _rp, file_dbp->pgsize);
+ rp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+
+ /*
+ * If the parent page is wrong, update it. This is of interest
+ * only if it was a root split, since root splits create parent
+ * pages. All other splits modify a parent page, but those are
+ * separately logged and recovered.
+ */
+ if (rootsplit && p_update) {
+ if (IS_BTREE_PAGE(sp)) {
+ ptype = P_IBTREE;
+ rc = argp->opflags & SPL_NRECS ? 1 : 0;
+ } else {
+ ptype = P_IRECNO;
+ rc = 1;
+ }
+
+ P_INIT(pp, file_dbp->pgsize, root_pgno,
+ PGNO_INVALID, PGNO_INVALID, _lp->level + 1, ptype);
+ RE_NREC_SET(pp, rc ? __bam_total(file_dbp, _lp) +
+ __bam_total(file_dbp, _rp) : 0);
+
+ pp->lsn = *lsnp;
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+check_next: /*
+ * Finally, redo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. The next
+ * page must exist because we're redoing the operation.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
+ __db_pgerr(file_dbp, argp->npgno, ret);
+ np = NULL;
+ goto out;
+ }
+ cmp = log_compare(&LSN(np), &argp->nlsn);
+ CHECK_LSN(op, cmp, &LSN(np), &argp->nlsn);
+ if (cmp == 0) {
+ PREV_PGNO(np) = argp->right;
+ np->lsn = *lsnp;
+ if ((ret =
+ mpf->put(mpf, np, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ np = NULL;
+ }
+ }
+ } else {
+ /*
+ * If the split page is wrong, replace its contents with the
+ * logged page contents. If the page doesn't exist, it means
+ * that the create of the page never happened, nor did any of
+ * the adds onto the page that caused the split, and there's
+ * really no undo-ing to be done.
+ */
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
+ pp = NULL;
+ goto lrundo;
+ }
+ if (log_compare(lsnp, &LSN(pp)) == 0) {
+ memcpy(pp, argp->pg.data, argp->pg.size);
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ pp = NULL;
+ }
+
+ /*
+ * If it's a root split and the left child ever existed, update
+ * its LSN. (If it's not a root split, we've updated the left
+ * page already -- it's the same as the split page.) If the
+ * right child ever existed, root split or not, update its LSN.
+ * The undo of the page allocation(s) will restore them to the
+ * free list.
+ */
+lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
+ if (rootsplit && lp != NULL &&
+ log_compare(lsnp, &LSN(lp)) == 0) {
+ lp->lsn = argp->llsn;
+ if ((ret =
+ mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ lp = NULL;
+ }
+ if (rp != NULL &&
+ log_compare(lsnp, &LSN(rp)) == 0) {
+ rp->lsn = argp->rlsn;
+ if ((ret =
+ mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ rp = NULL;
+ }
+ }
+
+ /*
+ * Finally, undo the next-page link if necessary. This is of
+ * interest only if it wasn't a root split -- inserting a new
+ * page in the tree requires that any following page have its
+ * previous-page pointer updated to our new page. Since it's
+ * possible that the next-page never existed, we ignore it as
+ * if there's nothing to undo.
+ */
+ if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
+ np = NULL;
+ goto done;
+ }
+ if (log_compare(lsnp, &LSN(np)) == 0) {
+ PREV_PGNO(np) = argp->left;
+ np->lsn = argp->nlsn;
+ if (mpf->put(mpf, np, DB_MPOOL_DIRTY))
+ goto out;
+ np = NULL;
+ }
+ }
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: /* Free any pages that weren't dirtied. */
+ if (pp != NULL && (t_ret = mpf->put(mpf, pp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (lp != NULL && (t_ret = mpf->put(mpf, lp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (np != NULL && (t_ret = mpf->put(mpf, np, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (rp != NULL && (t_ret = mpf->put(mpf, rp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Free any allocated space. */
+ if (_lp != NULL)
+ __os_free(dbenv, _lp);
+ if (_rp != NULL)
+ __os_free(dbenv, _rp);
+ if (sp != NULL)
+ __os_free(dbenv, sp);
+
+ REC_CLOSE;
+}
+
+/*
+ * __bam_rsplit_recover --
+ * Recovery function for a reverse split.
+ *
+ * PUBLIC: int __bam_rsplit_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rsplit_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno, root_pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_rsplit_print);
+ REC_INTRO(__bam_rsplit_read, 1);
+
+ /* Fix the root page. */
+ pgno = root_pgno = argp->root_pgno;
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
+ /* The root page must always exist if we are going forward. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ }
+ /* This must be the root of an OPD tree. */
+ DB_ASSERT(root_pgno !=
+ ((BTREE *)file_dbp->bt_internal)->bt_root);
+ ret = 0;
+ goto do_page;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->rootlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->rootlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ pagep->pgno = root_pgno;
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, root_pgno,
+ argp->nrec, PGNO_INVALID, pagep->level + 1,
+ IS_BTREE_PAGE(pagep) ? P_IBTREE : P_IRECNO);
+ if ((ret = __db_pitem(dbc, pagep, 0,
+ argp->rootent.size, &argp->rootent, NULL)) != 0)
+ goto out;
+ pagep->lsn = argp->rootlsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+do_page:
+ /*
+ * Fix the page copied over the root page. It's possible that the
+ * page never made it to disk, so if we're undo-ing and the page
+ * doesn't exist, it's okay and there's nothing further to do.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ modified = 0;
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_adj_recover --
+ * Recovery function for adj.
+ *
+ * PUBLIC: int __bam_adj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_adj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_adj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_adj_print);
+ REC_INTRO(__bam_adj_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, argp->is_insert)) != 0)
+ goto out;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if ((ret = __bam_adjindx(dbc,
+ pagep, argp->indx, argp->indx_copy, !argp->is_insert)) != 0)
+ goto out;
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_cadjust_recover --
+ * Recovery function for the adjust of a count change in an internal
+ * page.
+ *
+ * PUBLIC: int __bam_cadjust_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cadjust_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cadjust_print);
+ REC_INTRO(__bam_cadjust_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ } else {
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, argp->adjust);
+ }
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ if (IS_BTREE_PAGE(pagep)) {
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ } else {
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
+ if (argp->opflags & CAD_UPDATEROOT)
+ RE_NREC_ADJ(pagep, -(argp->adjust));
+ }
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_cdel_recover --
+ * Recovery function for the intent-to-delete of a cursor record.
+ *
+ * PUBLIC: int __bam_cdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_cdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t indx;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_cdel_print);
+ REC_INTRO(__bam_cdel_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, indx)->type);
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
+ B_DCLR(GET_BKEYDATA(file_dbp, pagep, indx)->type);
+
+ (void)__bam_ca_delete(file_dbp, argp->pgno, argp->indx, 0);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_repl_recover --
+ * Recovery function for page item replacement.
+ *
+ * PUBLIC: int __bam_repl_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_repl_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_repl_args *argp;
+ BKEYDATA *bk;
+ DB *file_dbp;
+ DBC *dbc;
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+ u_int8_t *p;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_repl_print);
+ REC_INTRO(__bam_repl_read, 1);
+
+ /* Get the page; if it never existed and we're undoing, we're done. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ bk = GET_BKEYDATA(file_dbp, pagep, argp->indx);
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /*
+ * Need to redo update described.
+ *
+ * Re-build the replacement item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->repl.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->repl.data, argp->repl.size);
+ p += argp->repl.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbenv, dbt.data);
+ if (ret != 0)
+ goto out;
+
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /*
+ * Need to undo update described.
+ *
+ * Re-build the original item.
+ */
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.size = argp->prefix + argp->suffix + argp->orig.size;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
+ p = dbt.data;
+ memcpy(p, bk->data, argp->prefix);
+ p += argp->prefix;
+ memcpy(p, argp->orig.data, argp->orig.size);
+ p += argp->orig.size;
+ memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
+
+ ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
+ __os_free(dbenv, dbt.data);
+ if (ret != 0)
+ goto out;
+
+ /* Reset the deleted flag, if necessary. */
+ if (argp->isdeleted)
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, argp->indx)->type);
+
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_root_recover --
+ * Recovery function for setting the root page on the meta-data page.
+ *
+ * PUBLIC: int __bam_root_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_root_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_root_args *argp;
+ BTMETA *meta;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int cmp_n, cmp_p, modified, ret;
+
+ meta = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__bam_root_print);
+ REC_INTRO(__bam_root_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->meta_pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->meta_pgno, ret);
+ goto out;
+ } else
+ goto done;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ meta->root = argp->root_pgno;
+ meta->dbmeta.lsn = *lsnp;
+ ((BTREE *)file_dbp->bt_internal)->bt_root = meta->root;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Nothing to undo except lsn. */
+ meta->dbmeta.lsn = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __bam_curadj_recover --
+ * Transaction abort function to undo cursor adjustments.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_curadj_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ int ret;
+
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__bam_curadj_print);
+ REC_INTRO(__bam_curadj_read, 0);
+
+ ret = 0;
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ switch(argp->mode) {
+ case DB_CA_DI:
+ if ((ret = __bam_ca_di(dbc, argp->from_pgno,
+ argp->from_indx, -(int)argp->first_indx)) != 0)
+ goto out;
+ break;
+ case DB_CA_DUP:
+ if ((ret = __bam_ca_undodup(file_dbp, argp->first_indx,
+ argp->from_pgno, argp->from_indx, argp->to_indx)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_RSPLIT:
+ if ((ret =
+ __bam_ca_rsplit(dbc, argp->to_pgno, argp->from_pgno)) != 0)
+ goto out;
+ break;
+
+ case DB_CA_SPLIT:
+ __bam_ca_undosplit(file_dbp, argp->from_pgno,
+ argp->to_pgno, argp->left_pgno, argp->from_indx);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __bam_rcuradj_recover --
+ * Transaction abort function to undo cursor adjustments in rrecno.
+ * This should only be triggered by subtransaction aborts.
+ *
+ * PUBLIC: int __bam_rcuradj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __bam_rcuradj_args *argp;
+ BTREE_CURSOR *cp;
+ DB *file_dbp;
+ DBC *dbc, *rdbc;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ COMPQUIET(info, NULL);
+ rdbc = NULL;
+
+ REC_PRINT(__bam_rcuradj_print);
+ REC_INTRO(__bam_rcuradj_read, 0);
+
+ ret = t_ret = 0;
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /*
+ * We don't know whether we're in an offpage dup set, and
+ * thus don't know whether the dbc REC_INTRO has handed us is
+ * of a reasonable type. It's certainly unset, so if this is
+ * an offpage dup set, we don't have an OPD cursor. The
+ * simplest solution is just to allocate a whole new cursor
+ * for our use; we're only really using it to hold pass some
+ * state into __ram_ca, and this way we don't need to make
+ * this function know anything about how offpage dups work.
+ */
+ if ((ret =
+ __db_icursor(file_dbp,
+ NULL, DB_RECNO, argp->root, 0, DB_LOCK_INVALIDID, &rdbc)) != 0)
+ goto out;
+
+ cp = (BTREE_CURSOR *)rdbc->internal;
+ F_SET(cp, C_RENUMBER);
+ cp->recno = argp->recno;
+
+ switch(argp->mode) {
+ case CA_DELETE:
+ /*
+ * The way to undo a delete is with an insert. Since
+ * we're undoing it, the delete flag must be set.
+ */
+ F_SET(cp, C_DELETED);
+ F_SET(cp, C_RENUMBER); /* Just in case. */
+ cp->order = argp->order;
+ __ram_ca(rdbc, CA_ICURRENT);
+ break;
+ case CA_IAFTER:
+ case CA_IBEFORE:
+ case CA_ICURRENT:
+ /*
+ * The way to undo an insert is with a delete. The delete
+ * flag is unset to start with.
+ */
+ F_CLR(cp, C_DELETED);
+ cp->order = INVALID_ORDER;
+ __ram_ca(rdbc, CA_DELETE);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+out: if (rdbc != NULL && (t_ret = rdbc->c_close(rdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ REC_CLOSE;
+}
diff --git a/storage/bdb/btree/bt_reclaim.c b/storage/bdb/btree/bt_reclaim.c
new file mode 100644
index 00000000000..ae4554ea7d6
--- /dev/null
+++ b/storage/bdb/btree/bt_reclaim.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_reclaim.c,v 11.11 2002/03/29 20:46:26 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+/*
+ * __bam_reclaim --
+ * Free a database.
+ *
+ * PUBLIC: int __bam_reclaim __P((DB *, DB_TXN *));
+ */
+int
+__bam_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_reclaim_callback, dbc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __bam_truncate --
+ * Truncate a database.
+ *
+ * PUBLIC: int __bam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__bam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ db_trunc_param trunc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = trunc.count;
+
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_recno.c b/storage/bdb/btree/bt_recno.c
new file mode 100644
index 00000000000..fab684f3a5f
--- /dev/null
+++ b/storage/bdb/btree/bt_recno.c
@@ -0,0 +1,1327 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_recno.c,v 11.106 2002/08/16 04:56:30 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __ram_add __P((DBC *, db_recno_t *, DBT *, u_int32_t, u_int32_t));
+static int __ram_source __P((DB *));
+static int __ram_sread __P((DBC *, db_recno_t));
+static int __ram_update __P((DBC *, db_recno_t, int));
+
+/*
+ * In recno, there are two meanings to the on-page "deleted" flag. If we're
+ * re-numbering records, it means the record was implicitly created. We skip
+ * over implicitly created records if doing a cursor "next" or "prev", and
+ * return DB_KEYEMPTY if they're explicitly requested.. If not re-numbering
+ * records, it means that the record was implicitly created, or was deleted.
+ * We skip over implicitly created or deleted records if doing a cursor "next"
+ * or "prev", and return DB_KEYEMPTY if they're explicitly requested.
+ *
+ * If we're re-numbering records, then we have to detect in the cursor that
+ * a record was deleted, and adjust the cursor as necessary on the next get.
+ * If we're not re-numbering records, then we can detect that a record has
+ * been deleted by looking at the actual on-page record, so we completely
+ * ignore the cursor's delete flag. This is different from the B+tree code.
+ * It also maintains whether the cursor references a deleted record in the
+ * cursor, and it doesn't always check the on-page value.
+ */
+#define CD_SET(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) \
+ F_SET(cp, C_DELETED); \
+}
+#define CD_CLR(cp) { \
+ if (F_ISSET(cp, C_RENUMBER)) { \
+ F_CLR(cp, C_DELETED); \
+ cp->order = INVALID_ORDER; \
+ } \
+}
+#define CD_ISSET(cp) \
+ (F_ISSET(cp, C_RENUMBER) && F_ISSET(cp, C_DELETED))
+
+/*
+ * Macros for comparing the ordering of two cursors.
+ * cp1 comes before cp2 iff one of the following holds:
+ * cp1's recno is less than cp2's recno
+ * recnos are equal, both deleted, and cp1's order is less than cp2's
+ * recnos are equal, cp1 deleted, and cp2 not deleted
+ */
+#define C_LESSTHAN(cp1, cp2) \
+ (((cp1)->recno < (cp2)->recno) || \
+ (((cp1)->recno == (cp2)->recno) && \
+ ((CD_ISSET((cp1)) && CD_ISSET((cp2)) && (cp1)->order < (cp2)->order) || \
+ (CD_ISSET((cp1)) && !CD_ISSET((cp2))))))
+
+/*
+ * cp1 is equal to cp2 iff their recnos and delete flags are identical,
+ * and if the delete flag is set their orders are also identical.
+ */
+#define C_EQUAL(cp1, cp2) \
+ ((cp1)->recno == (cp2)->recno && CD_ISSET((cp1)) == CD_ISSET((cp2)) && \
+ (!CD_ISSET((cp1)) || (cp1)->order == (cp2)->order))
+
+/*
+ * Do we need to log the current cursor adjustment?
+ */
+#define CURADJ_LOG(dbc) \
+ (DBC_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL)
+
+/*
+ * After a search, copy the found page into the cursor, discarding any
+ * currently held lock.
+ */
+#define STACK_TO_CURSOR(cp) { \
+ (cp)->page = (cp)->csp->page; \
+ (cp)->pgno = (cp)->csp->page->pgno; \
+ (cp)->indx = (cp)->csp->indx; \
+ (void)__TLPUT(dbc, (cp)->lock); \
+ (cp)->lock = (cp)->csp->lock; \
+ (cp)->lock_mode = (cp)->csp->lock_mode; \
+}
+
+/*
+ * __ram_open --
+ * Recno open function.
+ *
+ * PUBLIC: int __ram_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
+ */
+int
+__ram_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ BTREE *t;
+ DBC *dbc;
+ int ret, t_ret;
+
+ COMPQUIET(name, NULL);
+ t = dbp->bt_internal;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __bam_stat;
+
+ /* Start up the tree. */
+ if ((ret = __bam_read_root(dbp, txn, base_pgno, flags)) != 0)
+ return (ret);
+
+ /*
+ * If the user specified a source tree, open it and map it in.
+ *
+ * !!!
+ * We don't complain if the user specified transactions or threads.
+ * It's possible to make it work, but you'd better know what you're
+ * doing!
+ */
+ if (t->re_source != NULL && (ret = __ram_source(dbp)) != 0)
+ return (ret);
+
+ /* If we're snapshotting an underlying source file, do it now. */
+ if (F_ISSET(dbp, DB_AM_SNAPSHOT)) {
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Do the snapshot. */
+ if ((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0 && ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __ram_append --
+ * Recno append function.
+ *
+ * PUBLIC: int __ram_append __P((DBC *, DBT *, DBT *));
+ */
+int
+__ram_append(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ BTREE_CURSOR *cp;
+ int ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Make sure we've read in all of the backing source file. If
+ * we found the record or it simply didn't exist, add the
+ * user's record.
+ */
+ ret = __ram_update(dbc, DB_MAX_RECORDS, 0);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0);
+
+ /* Return the record number. */
+ if (ret == 0)
+ ret = __db_retcopy(dbc->dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_del --
+ * Recno cursor->c_del function.
+ *
+ * PUBLIC: int __ram_c_del __P((DBC *));
+ */
+int
+__ram_c_del(dbc)
+ DBC *dbc;
+{
+ BKEYDATA bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ DBT hdr, data;
+ EPG *epg;
+ int exact, ret, stack;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ stack = 0;
+
+ /*
+ * The semantics of cursors during delete are as follows: in
+ * non-renumbering recnos, records are replaced with a marker
+ * containing a delete flag. If the record referenced by this cursor
+ * has already been deleted, we will detect that as part of the delete
+ * operation, and fail.
+ *
+ * In renumbering recnos, cursors which represent deleted items
+ * are flagged with the C_DELETED flag, and it is an error to
+ * call c_del a second time without an intervening cursor motion.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+
+ /* Search the tree for the key; delete only deletes exact matches. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno, S_DELETE, 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ stack = 1;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * If re-numbering records, the on-page deleted flag can only mean
+ * that this record was implicitly created. Applications aren't
+ * permitted to delete records they never created, return an error.
+ *
+ * If not re-numbering records, the on-page deleted flag means that
+ * this record was implicitly created, or, was deleted at some time.
+ * The former is an error because applications aren't permitted to
+ * delete records they never created, the latter is an error because
+ * if the record was "deleted", we could never have found it.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (F_ISSET(cp, C_RENUMBER)) {
+ /* Delete the item, adjust the counts, adjust the cursors. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+ __bam_adjust(dbc, -1);
+ if (__ram_ca(dbc, CA_DELETE) > 0 &&
+ CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp, dbc->txn,
+ &lsn, 0, CA_DELETE, cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+
+ /*
+ * If the page is empty, delete it.
+ *
+ * We never delete a root page. First, root pages of primary
+ * databases never go away, recno or otherwise. However, if
+ * it's the root page of an off-page duplicates database, then
+ * it can be deleted. We don't delete it here because we have
+ * no way of telling the primary database page holder (e.g.,
+ * the hash access method) that its page element should cleaned
+ * up because the underlying tree is gone. So, we keep the page
+ * around until the last cursor referencing the empty tree is
+ * are closed, and then clean it up.
+ */
+ if (NUM_ENT(cp->page) == 0 && PGNO(cp->page) != cp->root) {
+ /*
+ * We already have a locked stack of pages. However,
+ * there are likely entries in the stack that aren't
+ * going to be emptied by removing the single reference
+ * to the emptied page (or one of its parents).
+ */
+ for (epg = cp->csp; epg >= cp->sp; --epg)
+ if (NUM_ENT(epg->page) > 1)
+ break;
+
+ /*
+ * We want to delete a single item out of the last page
+ * that we're not deleting.
+ */
+ ret = __bam_dpages(dbc, epg);
+
+ /*
+ * Regardless of the return from __bam_dpages, it will
+ * discard our stack and pinned page.
+ */
+ stack = 0;
+ cp->page = NULL;
+ }
+ } else {
+ /* Use a delete/put pair to replace the record with a marker. */
+ if ((ret = __bam_ditem(dbc, cp->page, cp->indx)) != 0)
+ goto err;
+
+ B_TSET(bk.type, B_KEYDATA, 1);
+ bk.len = 0;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bk;
+ hdr.size = SSZA(BKEYDATA, data);
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)"";
+ data.size = 0;
+ if ((ret = __db_pitem(dbc,
+ cp->page, cp->indx, BKEYDATA_SIZE(0), &hdr, &data)) != 0)
+ goto err;
+ }
+
+ t->re_modified = 1;
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_get --
+ * Recno cursor->c_get function.
+ *
+ * PUBLIC: int __ram_c_get
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ int cmp, exact, ret;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
+retry: switch (flags) {
+ case DB_CURRENT:
+ /*
+ * If we're using mutable records and the deleted flag is
+ * set, the cursor is pointing at a nonexistent record;
+ * return an error.
+ */
+ if (CD_ISSET(cp))
+ return (DB_KEYEMPTY);
+ break;
+ case DB_NEXT_DUP:
+ /*
+ * If we're not in an off-page dup set, we know there's no
+ * next duplicate since recnos don't have them. If we
+ * are in an off-page dup set, the next item assuredly is
+ * a dup, so we set flags to DB_NEXT and keep going.
+ */
+ if (!F_ISSET(dbc, DBC_OPD))
+ return (DB_NOTFOUND);
+ /* FALLTHROUGH */
+ case DB_NEXT_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_NEXT
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ flags = DB_NEXT;
+ /*
+ * If record numbers are mutable: if we just deleted a record,
+ * we have to avoid incrementing the record number so that we
+ * return the right record by virtue of renumbering the tree.
+ */
+ if (CD_ISSET(cp))
+ break;
+
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ cp->recno = 1;
+ break;
+ case DB_PREV_NODUP:
+ /*
+ * Recno databases don't have duplicates, set flags to DB_PREV
+ * and keep going.
+ */
+ /* FALLTHROUGH */
+ case DB_PREV:
+ flags = DB_PREV;
+ if (cp->recno != RECNO_OOB) {
+ if (cp->recno == 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ flags = DB_PREV;
+ if (((ret = __ram_update(dbc,
+ DB_MAX_RECORDS, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = __bam_nrecs(dbc, &cp->recno)) != 0)
+ goto err;
+ if (cp->recno == 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
+ case DB_GET_BOTHC:
+ /*
+ * If we're doing a join and these are offpage dups,
+ * we want to keep searching forward from after the
+ * current cursor position. Increment the recno by 1,
+ * then proceed as for a DB_SET.
+ *
+ * Otherwise, we know there are no additional matching
+ * data, as recnos don't have dups. return DB_NOTFOUND.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno++;
+ break;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * If we're searching a set of off-page dups, we start
+ * a new linear search from the first record. Otherwise,
+ * we compare the single data item associated with the
+ * requested record for a match.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ cp->recno = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_SET:
+ case DB_SET_RANGE:
+ if ((ret = __ram_getno(dbc, key, &cp->recno, 0)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbp->dbenv, "__ram_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * For DB_PREV, DB_LAST, DB_SET and DB_SET_RANGE, we have already
+ * called __ram_update() to make sure sufficient records have been
+ * read from the backing source file. Do it now for DB_CURRENT (if
+ * the current record was deleted we may need more records from the
+ * backing file for a DB_CURRENT operation), DB_FIRST and DB_NEXT.
+ * (We don't have to test for flags == DB_FIRST, because the switch
+ * statement above re-set flags to DB_NEXT in that case.)
+ */
+ if ((flags == DB_NEXT || flags == DB_CURRENT) && ((ret =
+ __ram_update(dbc, cp->recno, 0)) != 0) && ret != DB_NOTFOUND)
+ goto err;
+
+ for (;; ++cp->recno) {
+ /* Search the tree for the record. */
+ if ((ret = __bam_rsearch(dbc, &cp->recno,
+ F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
+ 1, &exact)) != 0)
+ goto err;
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * If re-numbering records, the on-page deleted flag means this
+ * record was implicitly created. If not re-numbering records,
+ * the on-page deleted flag means this record was implicitly
+ * created, or, it was deleted at some time. Regardless, we
+ * skip such records if doing cursor next/prev operations or
+ * walking through off-page duplicates, and fail if they were
+ * requested explicitly by the application.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type))
+ switch (flags) {
+ case DB_NEXT:
+ case DB_PREV:
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ goto retry;
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ /*
+ * If we're an OPD tree, we don't care about
+ * matching a record number on a DB_GET_BOTH
+ * -- everything belongs to the same tree. A
+ * normal recno should give up and return
+ * DB_NOTFOUND if the matching recno is deleted.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ continue;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
+ default:
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx, __bam_defcmp, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0)
+ break;
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ } else
+ break;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (!F_ISSET(dbc, DBC_OPD)) {
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE)
+ ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen);
+ F_SET(key, DB_DBT_ISSET);
+ }
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_c_put --
+ * Recno cursor->c_put function.
+ *
+ * PUBLIC: int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ram_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LSN lsn;
+ int exact, nc, ret, t_ret;
+ u_int32_t iiflags;
+ void *arg;
+
+ COMPQUIET(pgnop, NULL);
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * DB_KEYFIRST and DB_KEYLAST mean different things if they're
+ * used in an off-page duplicate tree. If we're an off-page
+ * duplicate tree, they really mean "put at the beginning of the
+ * tree" and "put at the end of the tree" respectively, so translate
+ * them to something else.
+ */
+ if (F_ISSET(dbc, DBC_OPD))
+ switch (flags) {
+ case DB_KEYFIRST:
+ cp->recno = 1;
+ flags = DB_BEFORE;
+ break;
+ case DB_KEYLAST:
+ if ((ret = __ram_add(dbc,
+ &cp->recno, data, DB_APPEND, 0)) != 0)
+ return (ret);
+ if (CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)))
+ return (ret);
+ return (0);
+ }
+
+ /*
+ * Handle normal DB_KEYFIRST/DB_KEYLAST; for a recno, which has
+ * no duplicates, these are identical and mean "put the given
+ * datum at the given recno".
+ *
+ * Note that the code here used to be in __ram_put; now, we
+ * go through the access-method-common __db_put function, which
+ * handles DB_NOOVERWRITE, so we and __ram_add don't have to.
+ */
+ if (flags == DB_KEYFIRST || flags == DB_KEYLAST) {
+ ret = __ram_getno(dbc, key, &cp->recno, 1);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &cp->recno, data, 0, 0);
+ return (ret);
+ }
+
+ /*
+ * If we're putting with a cursor that's marked C_DELETED, we need to
+ * take special care; the cursor doesn't "really" reference the item
+ * corresponding to its current recno, but instead is "between" that
+ * record and the current one. Translate the actual insert into
+ * DB_BEFORE, and let the __ram_ca work out the gory details of what
+ * should wind up pointing where.
+ */
+ if (CD_ISSET(cp))
+ iiflags = DB_BEFORE;
+ else
+ iiflags = flags;
+
+split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
+ goto err;
+ /*
+ * An inexact match is okay; it just means we're one record past the
+ * end, which is reasonable if we're marked deleted.
+ */
+ DB_ASSERT(exact || CD_ISSET(cp));
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ ret = __bam_iitem(dbc, key, data, iiflags, 0);
+ t_ret = __bam_stkrel(dbc, STK_CLRDBC);
+
+ if (t_ret != 0 && (ret == 0 || ret == DB_NEEDSPLIT))
+ ret = t_ret;
+ else if (ret == DB_NEEDSPLIT) {
+ arg = &cp->recno;
+ if ((ret = __bam_split(dbc, arg, NULL)) != 0)
+ goto err;
+ goto split;
+ }
+ if (ret != 0)
+ goto err;
+
+ switch (flags) { /* Adjust the cursors. */
+ case DB_AFTER:
+ nc = __ram_ca(dbc, CA_IAFTER);
+
+ /*
+ * We only need to adjust this cursor forward if we truly added
+ * the item after the current recno, rather than remapping it
+ * to DB_BEFORE.
+ */
+ if (iiflags == DB_AFTER)
+ ++cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IAFTER,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_BEFORE:
+ nc = __ram_ca(dbc, CA_IBEFORE);
+ --cp->recno;
+
+ /* Only log if __ram_ca found any relevant cursors. */
+ if (nc > 0 && CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IBEFORE,
+ cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ case DB_CURRENT:
+ /*
+ * We only need to do an adjustment if we actually
+ * added an item, which we only would have done if the
+ * cursor was marked deleted.
+ *
+ * Only log if __ram_ca found any relevant cursors.
+ */
+ if (CD_ISSET(cp) && __ram_ca(dbc, CA_ICURRENT) > 0 &&
+ CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0)
+ goto err;
+ break;
+ }
+
+ /* Return the key if we've created a new record. */
+ if (!F_ISSET(dbc, DBC_OPD) && (flags == DB_AFTER || flags == DB_BEFORE))
+ ret = __db_retcopy(dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ /* The cursor was reset, no further delete adjustment is necessary. */
+err: CD_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __ram_ca --
+ * Adjust cursors. Returns the number of relevant cursors.
+ *
+ * PUBLIC: int __ram_ca __P((DBC *, ca_recno_arg));
+ */
+int
+__ram_ca(dbc_arg, op)
+ DBC *dbc_arg;
+ ca_recno_arg op;
+{
+ BTREE_CURSOR *cp, *cp_arg;
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_recno_t recno;
+ int adjusted, found;
+ u_int32_t order;
+
+ dbp = dbc_arg->dbp;
+ dbenv = dbp->dbenv;
+ cp_arg = (BTREE_CURSOR *)dbc_arg->internal;
+ recno = cp_arg->recno;
+
+ found = 0;
+
+ /*
+ * It only makes sense to adjust cursors if we're a renumbering
+ * recno; we should only be called if this is one.
+ */
+ DB_ASSERT(F_ISSET(cp_arg, C_RENUMBER));
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Adjust the cursors. See the comment in __bam_ca_delete().
+ */
+ /*
+ * If we're doing a delete, we need to find the highest
+ * order of any cursor currently pointing at this item,
+ * so we can assign a higher order to the newly deleted
+ * cursor. Unfortunately, this requires a second pass through
+ * the cursor list.
+ */
+ if (op == CA_DELETE) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root == cp->root &&
+ recno == cp->recno && CD_ISSET(cp) &&
+ order <= cp->order)
+ order = cp->order + 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ } else
+ order = INVALID_ORDER;
+
+ /* Now go through and do the actual adjustments. */
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&ldbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if (cp_arg->root != cp->root)
+ continue;
+ ++found;
+ adjusted = 0;
+ switch (op) {
+ case CA_DELETE:
+ if (recno < cp->recno) {
+ --cp->recno;
+ /*
+ * If the adjustment made them equal,
+ * we have to merge the orders.
+ */
+ if (recno == cp->recno && CD_ISSET(cp))
+ cp->order += order;
+ } else if (recno == cp->recno &&
+ !CD_ISSET(cp)) {
+ CD_SET(cp);
+ cp->order = order;
+ }
+ break;
+ case CA_IBEFORE:
+ /*
+ * IBEFORE is just like IAFTER, except that we
+ * adjust cursors on the current record too.
+ */
+ if (C_EQUAL(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ goto iafter;
+ case CA_ICURRENT:
+
+ /*
+ * If the original cursor wasn't deleted, we
+ * just did a replacement and so there's no
+ * need to adjust anything--we shouldn't have
+ * gotten this far. Otherwise, we behave
+ * much like an IAFTER, except that all
+ * cursors pointing to the current item get
+ * marked undeleted and point to the new
+ * item.
+ */
+ DB_ASSERT(CD_ISSET(cp_arg));
+ if (C_EQUAL(cp_arg, cp)) {
+ CD_CLR(cp);
+ break;
+ }
+ /* FALLTHROUGH */
+ case CA_IAFTER:
+iafter: if (!adjusted && C_LESSTHAN(cp_arg, cp)) {
+ ++cp->recno;
+ adjusted = 1;
+ }
+ if (recno == cp->recno && adjusted)
+ /*
+ * If we've moved this cursor's recno,
+ * split its order number--i.e.,
+ * decrement it by enough so that
+ * the lowest cursor moved has order 1.
+ * cp_arg->order is the split point,
+ * so decrement by one less than that.
+ */
+ cp->order -= (cp_arg->order - 1);
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (found);
+}
+
+/*
+ * __ram_getno --
+ * Check the user's record number, and make sure we've seen it.
+ *
+ * PUBLIC: int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+ */
+int
+__ram_getno(dbc, key, rep, can_create)
+ DBC *dbc;
+ const DBT *key;
+ db_recno_t *rep;
+ int can_create;
+{
+ DB *dbp;
+ db_recno_t recno;
+
+ dbp = dbc->dbp;
+
+ /* Check the user's record number. */
+ if ((recno = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ if (rep != NULL)
+ *rep = recno;
+
+ /*
+ * Btree can neither create records nor read them in. Recno can
+ * do both, see if we can find the record.
+ */
+ return (dbc->dbtype == DB_RECNO ?
+ __ram_update(dbc, recno, can_create) : 0);
+}
+
+/*
+ * __ram_update --
+ * Ensure the tree has records up to and including the specified one.
+ */
+static int
+__ram_update(dbc, recno, can_create)
+ DBC *dbc;
+ db_recno_t recno;
+ int can_create;
+{
+ BTREE *t;
+ DB *dbp;
+ DBT *rdata;
+ db_recno_t nrecs;
+ int ret;
+
+ dbp = dbc->dbp;
+ t = dbp->bt_internal;
+
+ /*
+ * If we can't create records and we've read the entire backing input
+ * file, we're done.
+ */
+ if (!can_create && t->re_eof)
+ return (0);
+
+ /*
+ * If we haven't seen this record yet, try to get it from the original
+ * file.
+ */
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ if (!t->re_eof && recno > nrecs) {
+ if ((ret = __ram_sread(dbc, recno)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = __bam_nrecs(dbc, &nrecs)) != 0)
+ return (ret);
+ }
+
+ /*
+ * If we can create records, create empty ones up to the requested
+ * record.
+ */
+ if (!can_create || recno <= nrecs + 1)
+ return (0);
+
+ rdata = &dbc->my_rdata;
+ rdata->flags = 0;
+ rdata->size = 0;
+
+ while (recno > ++nrecs)
+ if ((ret = __ram_add(dbc,
+ &nrecs, rdata, 0, BI_DELETED)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * __ram_source --
+ * Load information about the backing file.
+ */
+static int
+__ram_source(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ char *source;
+ int ret;
+
+ t = dbp->bt_internal;
+
+ /* Find the real name, and swap out the one we had before. */
+ if ((ret = __db_appname(dbp->dbenv,
+ DB_APP_DATA, t->re_source, 0, NULL, &source)) != 0)
+ return (ret);
+ __os_free(dbp->dbenv, t->re_source);
+ t->re_source = source;
+
+ /*
+ * !!!
+ * It's possible that the backing source file is read-only. We don't
+ * much care other than we'll complain if there are any modifications
+ * when it comes time to write the database back to the source.
+ */
+ if ((t->re_fp = fopen(t->re_source, "r")) == NULL) {
+ ret = errno;
+ __db_err(dbp->dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ return (ret);
+ }
+
+ t->re_eof = 0;
+ return (0);
+}
+
+/*
+ * __ram_writeback --
+ * Rewrite the backing file.
+ *
+ * PUBLIC: int __ram_writeback __P((DB *));
+ */
+int
+__ram_writeback(dbp)
+ DB *dbp;
+{
+ BTREE *t;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DBT key, data;
+ FILE *fp;
+ db_recno_t keyno;
+ int ret, t_ret;
+ u_int8_t delim, *pad;
+
+ t = dbp->bt_internal;
+ dbenv = dbp->dbenv;
+ fp = NULL;
+ pad = NULL;
+
+ /* If the file wasn't modified, we're done. */
+ if (!t->re_modified)
+ return (0);
+
+ /* If there's no backing source file, we're done. */
+ if (t->re_source == NULL) {
+ t->re_modified = 0;
+ return (0);
+ }
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ /*
+ * Read any remaining records into the tree.
+ *
+ * !!!
+ * This is why we can't support transactions when applications specify
+ * backing (re_source) files. At this point we have to read in the
+ * rest of the records from the file so that we can write all of the
+ * records back out again, which could modify a page for which we'd
+ * have to log changes and which we don't have locked. This could be
+ * partially fixed by taking a snapshot of the entire file during the
+ * DB->open as DB->open is transaction protected. But, if a checkpoint
+ * occurs then, the part of the log holding the copy of the file could
+ * be discarded, and that would make it impossible to recover in the
+ * face of disaster. This could all probably be fixed, but it would
+ * require transaction protecting the backing source file.
+ *
+ * XXX
+ * This could be made to work now that we have transactions protecting
+ * file operations. Margo has specifically asked for the privilege of
+ * doing this work.
+ */
+ if ((ret =
+ __ram_update(dbc, DB_MAX_RECORDS, 0)) != 0 && ret != DB_NOTFOUND)
+ return (ret);
+
+ /*
+ * Close any existing file handle and re-open the file, truncating it.
+ */
+ if (t->re_fp != NULL) {
+ if (fclose(t->re_fp) != 0) {
+ ret = errno;
+ goto err;
+ }
+ t->re_fp = NULL;
+ }
+ if ((fp = fopen(t->re_source, "w")) == NULL) {
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We step through the records, writing each one out. Use the record
+ * number and the dbp->get() function, instead of a cursor, so we find
+ * and write out "deleted" or non-existent records. The DB handle may
+ * be threaded, so allocate memory as we go.
+ */
+ memset(&key, 0, sizeof(key));
+ key.size = sizeof(db_recno_t);
+ key.data = &keyno;
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
+
+ /*
+ * We'll need the delimiter if we're doing variable-length records,
+ * and the pad character if we're doing fixed-length records.
+ */
+ delim = t->re_delim;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ if ((ret = __os_malloc(dbenv, t->re_len, &pad)) != 0)
+ goto err;
+ memset(pad, t->re_pad, t->re_len);
+ }
+ for (keyno = 1;; ++keyno) {
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ if (data.size != 0 && (u_int32_t)fwrite(
+ data.data, 1, data.size, fp) != data.size)
+ goto write_err;
+ break;
+ case DB_KEYEMPTY:
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ (u_int32_t)fwrite(pad, 1, t->re_len, fp) !=
+ t->re_len)
+ goto write_err;
+ break;
+ case DB_NOTFOUND:
+ ret = 0;
+ goto done;
+ default:
+ goto err;
+ }
+ if (!F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ fwrite(&delim, 1, 1, fp) != 1) {
+write_err: ret = errno;
+ __db_err(dbp->dbenv,
+ "%s: write failed to backing file: %s",
+ t->re_source, strerror(ret));
+ goto err;
+ }
+ }
+
+err:
+done: /* Close the file descriptor. */
+ if (fp != NULL && fclose(fp) != 0) {
+ if (ret == 0)
+ ret = errno;
+ __db_err(dbenv, "%s: %s", t->re_source, db_strerror(errno));
+ }
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard memory allocated to hold the data items. */
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ if (pad != NULL)
+ __os_free(dbenv, pad);
+
+ if (ret == 0)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_sread --
+ * Read records from a source file.
+ */
+static int
+__ram_sread(dbc, top)
+ DBC *dbc;
+ db_recno_t top;
+{
+ BTREE *t;
+ DB *dbp;
+ DBT data, *rdata;
+ db_recno_t recno;
+ size_t len;
+ int ch, ret, was_modified;
+
+ t = dbc->dbp->bt_internal;
+ dbp = dbc->dbp;
+ was_modified = t->re_modified;
+
+ if ((ret = __bam_nrecs(dbc, &recno)) != 0)
+ return (ret);
+
+ /*
+ * Use the record key return memory, it's only a short-term use.
+ * The record data return memory is used by __bam_iitem, which
+ * we'll indirectly call, so use the key so as not to collide.
+ */
+ len = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : 256;
+ rdata = &dbc->my_rkey;
+ if (rdata->ulen < len) {
+ if ((ret = __os_realloc(
+ dbp->dbenv, len, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ }
+ rdata->ulen = (u_int32_t)len;
+ }
+
+ memset(&data, 0, sizeof(data));
+ while (recno < top) {
+ data.data = rdata->data;
+ data.size = 0;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
+ for (len = t->re_len; len > 0; --len) {
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ }
+ else
+ for (;;) {
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
+ if (ch == t->re_delim)
+ break;
+
+ ((u_int8_t *)data.data)[data.size++] = ch;
+ if (data.size == rdata->ulen) {
+ if ((ret = __os_realloc(dbp->dbenv,
+ rdata->ulen *= 2,
+ &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
+ return (ret);
+ } else
+ data.data = rdata->data;
+ }
+ }
+
+ /*
+ * Another process may have read this record from the input
+ * file and stored it into the database already, in which
+ * case we don't need to repeat that operation. We detect
+ * this by checking if the last record we've read is greater
+ * or equal to the number of records in the database.
+ */
+ if (t->re_last >= recno) {
+ ++recno;
+ if ((ret = __ram_add(dbc, &recno, &data, 0, 0)) != 0)
+ goto err;
+ }
+ ++t->re_last;
+ }
+
+ if (0) {
+eof: t->re_eof = 1;
+ ret = DB_NOTFOUND;
+ }
+err: if (!was_modified)
+ t->re_modified = 0;
+
+ return (ret);
+}
+
+/*
+ * __ram_add --
+ * Add records into the tree.
+ */
+static int
+__ram_add(dbc, recnop, data, flags, bi_flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ DBT *data;
+ u_int32_t flags, bi_flags;
+{
+ BTREE_CURSOR *cp;
+ int exact, ret, stack;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+retry: /* Find the slot for insertion. */
+ if ((ret = __bam_rsearch(dbc, recnop,
+ S_INSERT | (flags == DB_APPEND ? S_APPEND : 0), 1, &exact)) != 0)
+ return (ret);
+ stack = 1;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, *recnop)) != 0)
+ goto err;
+
+ /*
+ * Select the arguments for __bam_iitem() and do the insert. If the
+ * key is an exact match, or we're replacing the data item with a
+ * new data item, replace the current item. If the key isn't an exact
+ * match, we're inserting a new key/data pair, before the search
+ * location.
+ */
+ switch (ret = __bam_iitem(dbc,
+ NULL, data, exact ? DB_CURRENT : DB_BEFORE, bi_flags)) {
+ case 0:
+ /*
+ * Don't adjust anything.
+ *
+ * If we inserted a record, no cursors need adjusting because
+ * the only new record it's possible to insert is at the very
+ * end of the tree. The necessary adjustments to the internal
+ * page counts were made by __bam_iitem().
+ *
+ * If we overwrote a record, no cursors need adjusting because
+ * future DBcursor->get calls will simply return the underlying
+ * record (there's no adjustment made for the DB_CURRENT flag
+ * when a cursor get operation immediately follows a cursor
+ * delete operation, and the normal adjustment for the DB_NEXT
+ * flag is still correct).
+ */
+ break;
+ case DB_NEEDSPLIT:
+ /* Discard the stack of pages and split the page. */
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ stack = 0;
+
+ if ((ret = __bam_split(dbc, recnop, NULL)) != 0)
+ goto err;
+
+ goto retry;
+ /* NOTREACHED */
+ default:
+ goto err;
+ }
+
+err: if (stack)
+ __bam_stkrel(dbc, STK_CLRDBC);
+
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_rsearch.c b/storage/bdb/btree/bt_rsearch.c
new file mode 100644
index 00000000000..a75181b44e2
--- /dev/null
+++ b/storage/bdb/btree/bt_rsearch.c
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_rsearch.c,v 11.34 2002/07/03 19:03:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_rsearch --
+ * Search a btree for a record number.
+ *
+ * PUBLIC: int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+ */
+int
+__bam_rsearch(dbc, recnop, flags, stop, exactp)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+ int stop, *exactp;
+{
+ BINTERNAL *bi;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t adjust, deloffset, indx, top;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno, t_recno, total;
+ int ret, stack;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks and if we are
+ * locking pairs of pages. In addition, if we're adding or deleting
+ * an item, we have to lock the entire tree, regardless. See btree.h
+ * for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+ pg = cp->root;
+ stack = LF_ISSET(S_STACK) ? 1 : 0;
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ stack = 1;
+ }
+
+ /*
+ * If appending to the tree, set the record number now -- we have the
+ * root page locked.
+ *
+ * Delete only deletes exact matches, read only returns exact matches.
+ * Note, this is different from __bam_search(), which returns non-exact
+ * matches for read.
+ *
+ * The record may not exist. We can only return the correct location
+ * for the record immediately after the last record in the tree, so do
+ * a fast check now.
+ */
+ total = RE_NREC(h);
+ if (LF_ISSET(S_APPEND)) {
+ *exactp = 0;
+ *recnop = recno = total + 1;
+ } else {
+ recno = *recnop;
+ if (recno <= total)
+ *exactp = 1;
+ else {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) || recno > total + 1) {
+ /*
+ * Keep the page locked for serializability.
+ *
+ * XXX
+ * This leaves the root page locked, which will
+ * eliminate any concurrency. A possible fix
+ * would be to lock the last leaf page instead.
+ */
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ return (DB_NOTFOUND);
+ }
+ }
+ }
+
+ /*
+ * !!!
+ * Record numbers in the tree are 0-based, but the recno is
+ * 1-based. All of the calculations below have to take this
+ * into account.
+ */
+ for (total = 0;;) {
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ case P_LDUP:
+ recno -= total;
+ /*
+ * There may be logically deleted records on the page.
+ * If there are enough, the record may not exist.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ adjust = P_INDX;
+ deloffset = O_INDX;
+ } else {
+ adjust = O_INDX;
+ deloffset = 0;
+ }
+ for (t_recno = 0, indx = 0;; indx += adjust) {
+ if (indx >= NUM_ENT(h)) {
+ *exactp = 0;
+ if (!LF_ISSET(S_PAST_EOF) ||
+ recno > t_recno + 1) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ }
+ if (!B_DISSET(GET_BKEYDATA(dbp, h,
+ indx + deloffset)->type) &&
+ ++t_recno == recno)
+ break;
+ }
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IBTREE:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (++indx == top || total + bi->nrecs >= recno)
+ break;
+ total += bi->nrecs;
+ }
+ pg = bi->pgno;
+ break;
+ case P_LRECNO:
+ recno -= total;
+
+ /* Correct from 1-based to 0-based for a page offset. */
+ --recno;
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, recno, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ case P_IRECNO:
+ for (indx = 0, top = NUM_ENT(h);;) {
+ ri = GET_RINTERNAL(dbp, h, indx);
+ if (++indx == top || total + ri->nrecs >= recno)
+ break;
+ total += ri->nrecs;
+ }
+ pg = ri->pgno;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+ --indx;
+
+ if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a pointer to the next
+ * page in the stack. If we do, write lock it and
+ * never unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)mpf->put(mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_adjust --
+ * Adjust the tree after adding or deleting a record.
+ *
+ * PUBLIC: int __bam_adjust __P((DBC *, int32_t));
+ */
+int
+__bam_adjust(dbc, adjust)
+ DBC *dbc;
+ int32_t adjust;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ PAGE *h;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /* Update the record counts for the tree. */
+ for (epg = cp->sp; epg <= cp->csp; ++epg) {
+ h = epg->page;
+ if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) {
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(h), 0, PGNO(h), &LSN(h),
+ (u_int32_t)epg->indx, adjust,
+ PGNO(h) == root_pgno ?
+ CAD_UPDATEROOT : 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+
+ if (TYPE(h) == P_IBTREE)
+ GET_BINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
+ else
+ GET_RINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
+
+ if (PGNO(h) == root_pgno)
+ RE_NREC_ADJ(h, adjust);
+
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __bam_nrecs --
+ * Return the number of records in the tree.
+ *
+ * PUBLIC: int __bam_nrecs __P((DBC *, db_recno_t *));
+ */
+int
+__bam_nrecs(dbc, rep)
+ DBC *dbc;
+ db_recno_t *rep;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ pgno = dbc->internal->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ *rep = RE_NREC(h);
+
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+
+ return (0);
+}
+
+/*
+ * __bam_total --
+ * Return the number of records below a page.
+ *
+ * PUBLIC: db_recno_t __bam_total __P((DB *, PAGE *));
+ */
+db_recno_t
+__bam_total(dbp, h)
+ DB *dbp;
+ PAGE *h;
+{
+ db_recno_t nrecs;
+ db_indx_t indx, top;
+
+ nrecs = 0;
+ top = NUM_ENT(h);
+
+ switch (TYPE(h)) {
+ case P_LBTREE:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += P_INDX)
+ if (!B_DISSET(
+ GET_BKEYDATA(dbp, h, indx + O_INDX)->type))
+ ++nrecs;
+ break;
+ case P_LDUP:
+ /* Check for logically deleted records. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
+ ++nrecs;
+ break;
+ case P_IBTREE:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_BINTERNAL(dbp, h, indx)->nrecs;
+ break;
+ case P_LRECNO:
+ nrecs = NUM_ENT(h);
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < top; indx += O_INDX)
+ nrecs += GET_RINTERNAL(dbp, h, indx)->nrecs;
+ break;
+ }
+
+ return (nrecs);
+}
diff --git a/storage/bdb/btree/bt_search.c b/storage/bdb/btree/bt_search.c
new file mode 100644
index 00000000000..92b2106311d
--- /dev/null
+++ b/storage/bdb/btree/bt_search.c
@@ -0,0 +1,475 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_search.c,v 11.43 2002/07/03 19:03:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __bam_search --
+ * Search a btree for a key.
+ *
+ * PUBLIC: int __bam_search __P((DBC *, db_pgno_t,
+ * PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *));
+ */
+int
+__bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ const DBT *key;
+ u_int32_t flags;
+ int stop, *exactp;
+ db_recno_t *recnop;
+{
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t base, i, indx, *inp, lim;
+ db_lockmode_t lock_mode;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int adjust, cmp, deloffset, ret, stack;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ recno = 0;
+
+ BT_STK_CLR(cp);
+
+ /*
+ * There are several ways we search a btree tree. The flags argument
+ * specifies if we're acquiring read or write locks, if we position
+ * to the first or last item in a set of duplicates, if we return
+ * deleted items, and if we are locking pairs of pages. In addition,
+ * if we're modifying record numbers, we have to lock the entire tree
+ * regardless. See btree.h for more details.
+ *
+ * If write-locking pages, we need to know whether or not to acquire a
+ * write lock on a page before getting it. This depends on how deep it
+ * is in tree, which we don't know until we acquire the root page. So,
+ * if we need to lock the root page we may have to upgrade it later,
+ * because we won't get the correct lock initially.
+ *
+ * Retrieve the root page.
+ */
+try_again:
+ pg = root_pgno == PGNO_INVALID ? cp->root : root_pgno;
+ stack = LF_ISSET(S_STACK) && F_ISSET(cp, C_RECNUM);
+ lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /*
+ * Decide if we need to save this page; if we do, write lock it.
+ * We deliberately don't lock-couple on this call. If the tree
+ * is tiny, i.e., one page, and two threads are busily updating
+ * the root page, we're almost guaranteed deadlocks galore, as
+ * each one gets a read lock and then blocks the other's attempt
+ * for a write lock.
+ */
+ if (!stack &&
+ ((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
+ /* Did not read it, so we can release the lock */
+ (void)__LPUT(dbc, lock);
+ return (ret);
+ }
+ if (!((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= h->level) ||
+ (LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
+ /* Someone else split the root, start over. */
+ (void)mpf->put(mpf, h, 0);
+ (void)__LPUT(dbc, lock);
+ goto try_again;
+ }
+ stack = 1;
+ }
+
+ /* Choose a comparison function. */
+ func = F_ISSET(dbc, DBC_OPD) ?
+ (dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare) :
+ t->bt_compare;
+
+ for (;;) {
+ inp = P_INP(dbp, h);
+ /*
+ * Do a binary search on the current page. If we're searching
+ * a Btree leaf page, we have to walk the indices in groups of
+ * two. If we're searching an internal page or a off-page dup
+ * page, they're an index per page item. If we find an exact
+ * match on a leaf page, we're done.
+ */
+ adjust = TYPE(h) == P_LBTREE ? P_INDX : O_INDX;
+ for (base = 0,
+ lim = NUM_ENT(h) / (db_indx_t)adjust; lim != 0; lim >>= 1) {
+ indx = base + ((lim >> 1) * adjust);
+ if ((ret =
+ __bam_cmp(dbp, key, h, indx, func, &cmp)) != 0)
+ goto err;
+ if (cmp == 0) {
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP)
+ goto found;
+ goto next;
+ }
+ if (cmp > 0) {
+ base = indx + adjust;
+ --lim;
+ }
+ }
+
+ /*
+ * No match found. Base is the smallest index greater than
+ * key and may be zero or a last + O_INDX index.
+ *
+ * If it's a leaf page, return base as the "found" value.
+ * Delete only deletes exact matches.
+ */
+ if (TYPE(h) == P_LBTREE || TYPE(h) == P_LDUP) {
+ *exactp = 0;
+
+ if (LF_ISSET(S_EXACT))
+ goto notfound;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, base, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+
+ /*
+ * !!!
+ * Possibly returning a deleted record -- DB_SET_RANGE,
+ * DB_KEYFIRST and DB_KEYLAST don't require an exact
+ * match, and we don't want to walk multiple pages here
+ * to find an undeleted record. This is handled by the
+ * calling routine.
+ */
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, base, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+
+ /*
+ * If it's not a leaf page, record the internal page (which is
+ * a parent page for the key). Decrement the base by 1 if it's
+ * non-zero so that if a split later occurs, the inserted page
+ * will be to the right of the saved page.
+ */
+ indx = base > 0 ? base - O_INDX : base;
+
+ /*
+ * If we're trying to calculate the record number, sum up
+ * all the record numbers on this page up to the indx point.
+ */
+next: if (recnop != NULL)
+ for (i = 0; i < indx; ++i)
+ recno += GET_BINTERNAL(dbp, h, i)->nrecs;
+
+ pg = GET_BINTERNAL(dbp, h, indx)->pgno;
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ if (stop == h->level) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+ BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret);
+ (void)mpf->put(mpf, h, 0);
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * Discard our lock and return on failure. This
+ * is OK because it only happens when descending
+ * the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+ } else if (stack) {
+ /* Return if this is the lowest page wanted. */
+ if (LF_ISSET(S_PARENT) && stop == h->level) {
+ BT_STK_ENTER(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ return (0);
+ }
+ BT_STK_PUSH(dbp->dbenv,
+ cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+
+ lock_mode = DB_LOCK_WRITE;
+ if ((ret =
+ __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
+ goto err;
+ } else {
+ /*
+ * Decide if we want to return a reference to the next
+ * page in the return stack. If so, lock it and never
+ * unlock it.
+ */
+ if ((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= (u_int8_t)(h->level - 1)) ||
+ (h->level - 1) == LEAFLEVEL)
+ stack = 1;
+
+ (void)mpf->put(mpf, h, 0);
+
+ lock_mode = stack &&
+ LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __db_lget(dbc,
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
+ /*
+ * If we fail, discard the lock we held. This
+ * is OK because this only happens when we are
+ * descending the tree holding read-locks.
+ */
+ __LPUT(dbc, lock);
+ goto err;
+ }
+ }
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
+ goto err;
+ }
+ /* NOTREACHED */
+
+found: *exactp = 1;
+
+ /*
+ * If we're trying to calculate the record number, add in the
+ * offset on this page and correct for the fact that records
+ * in the tree are 0-based.
+ */
+ if (recnop != NULL)
+ *recnop = recno + (indx / P_INDX) + 1;
+
+ /*
+ * If we got here, we know that we have a Btree leaf or off-page
+ * duplicates page. If it's a Btree leaf page, we have to handle
+ * on-page duplicates.
+ *
+ * If there are duplicates, go to the first/last one. This is
+ * safe because we know that we're not going to leave the page,
+ * all duplicate sets that are not on overflow pages exist on a
+ * single leaf page.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ if (LF_ISSET(S_DUPLAST))
+ while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
+ inp[indx] == inp[indx + P_INDX])
+ indx += P_INDX;
+ else
+ while (indx > 0 &&
+ inp[indx] == inp[indx - P_INDX])
+ indx -= P_INDX;
+ }
+
+ /*
+ * Now check if we are allowed to return deleted items; if not, then
+ * find the next (or previous) non-deleted duplicate entry. (We do
+ * not move from the original found key on the basis of the S_DELNO
+ * flag.)
+ */
+ if (LF_ISSET(S_DELNO)) {
+ deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0;
+ if (LF_ISSET(S_DUPLAST))
+ while (B_DISSET(GET_BKEYDATA(dbp,
+ h, indx + deloffset)->type) && indx > 0 &&
+ inp[indx] == inp[indx - adjust])
+ indx -= adjust;
+ else
+ while (B_DISSET(GET_BKEYDATA(dbp,
+ h, indx + deloffset)->type) &&
+ indx < (db_indx_t)(NUM_ENT(h) - adjust) &&
+ inp[indx] == inp[indx + adjust])
+ indx += adjust;
+
+ /*
+ * If we weren't able to find a non-deleted duplicate, return
+ * DB_NOTFOUND.
+ */
+ if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type))
+ goto notfound;
+ }
+
+ if (LF_ISSET(S_STK_ONLY)) {
+ BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
+ __LPUT(dbc, lock);
+ (void)mpf->put(mpf, h, 0);
+ } else {
+ BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret);
+ if (ret != 0)
+ goto err;
+ }
+ return (0);
+
+notfound:
+ /* Keep the page locked for serializability. */
+ (void)mpf->put(mpf, h, 0);
+ (void)__TLPUT(dbc, lock);
+ ret = DB_NOTFOUND;
+
+err: BT_STK_POP(cp);
+ __bam_stkrel(dbc, 0);
+ return (ret);
+}
+
+/*
+ * __bam_stkrel --
+ * Release all pages currently held in the stack.
+ *
+ * PUBLIC: int __bam_stkrel __P((DBC *, u_int32_t));
+ */
+int
+__bam_stkrel(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ EPG *epg;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * Release inner pages first.
+ *
+ * The caller must be sure that setting STK_NOLOCK will not effect
+ * either serializability or recoverability.
+ */
+ for (ret = 0, epg = cp->sp; epg <= cp->csp; ++epg) {
+ if (epg->page != NULL) {
+ if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) {
+ cp->page = NULL;
+ LOCK_INIT(cp->lock);
+ }
+ if ((t_ret =
+ mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * XXX
+ * Temporary fix for #3243 -- under certain deadlock
+ * conditions we call here again and re-free the page.
+ * The correct fix is to never release a stack that
+ * doesn't hold items.
+ */
+ epg->page = NULL;
+ }
+ if (LF_ISSET(STK_NOLOCK))
+ (void)__LPUT(dbc, epg->lock);
+ else
+ (void)__TLPUT(dbc, epg->lock);
+ }
+
+ /* Clear the stack, all pages have been released. */
+ BT_STK_CLR(cp);
+
+ return (ret);
+}
+
+/*
+ * __bam_stkgrow --
+ * Grow the stack.
+ *
+ * PUBLIC: int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+ */
+int
+__bam_stkgrow(dbenv, cp)
+ DB_ENV *dbenv;
+ BTREE_CURSOR *cp;
+{
+ EPG *p;
+ size_t entries;
+ int ret;
+
+ entries = cp->esp - cp->sp;
+
+ if ((ret = __os_calloc(dbenv, entries * 2, sizeof(EPG), &p)) != 0)
+ return (ret);
+ memcpy(p, cp->sp, entries * sizeof(EPG));
+ if (cp->sp != cp->stack)
+ __os_free(dbenv, cp->sp);
+ cp->sp = p;
+ cp->csp = p + entries;
+ cp->esp = p + entries * 2;
+ return (0);
+}
diff --git a/storage/bdb/btree/bt_split.c b/storage/bdb/btree/bt_split.c
new file mode 100644
index 00000000000..f3302a6905f
--- /dev/null
+++ b/storage/bdb/btree/bt_split.c
@@ -0,0 +1,1177 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_split.c,v 11.58 2002/07/03 19:03:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/btree.h"
+
+static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *));
+static int __bam_page __P((DBC *, EPG *, EPG *));
+static int __bam_pinsert __P((DBC *, EPG *, PAGE *, PAGE *, int));
+static int __bam_psplit __P((DBC *, EPG *, PAGE *, PAGE *, db_indx_t *));
+static int __bam_root __P((DBC *, EPG *));
+static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *));
+
+/*
+ * __bam_split --
+ * Split a page.
+ *
+ * PUBLIC: int __bam_split __P((DBC *, void *, db_pgno_t *));
+ */
+int
+__bam_split(dbc, arg, root_pgnop)
+ DBC *dbc;
+ void *arg;
+ db_pgno_t *root_pgnop;
+{
+ BTREE_CURSOR *cp;
+ enum { UP, DOWN } dir;
+ db_pgno_t root_pgno;
+ int exact, level, ret;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
+
+ /*
+ * The locking protocol we use to avoid deadlock to acquire locks by
+ * walking down the tree, but we do it as lazily as possible, locking
+ * the root only as a last resort. We expect all stack pages to have
+ * been discarded before we're called; we discard all short-term locks.
+ *
+ * When __bam_split is first called, we know that a leaf page was too
+ * full for an insert. We don't know what leaf page it was, but we
+ * have the key/recno that caused the problem. We call XX_search to
+ * reacquire the leaf page, but this time get both the leaf page and
+ * its parent, locked. We then split the leaf page and see if the new
+ * internal key will fit into the parent page. If it will, we're done.
+ *
+ * If it won't, we discard our current locks and repeat the process,
+ * only this time acquiring the parent page and its parent, locked.
+ * This process repeats until we succeed in the split, splitting the
+ * root page as the final resort. The entire process then repeats,
+ * as necessary, until we split a leaf page.
+ *
+ * XXX
+ * A traditional method of speeding this up is to maintain a stack of
+ * the pages traversed in the original search. You can detect if the
+ * stack is correct by storing the page's LSN when it was searched and
+ * comparing that LSN with the current one when it's locked during the
+ * split. This would be an easy change for this code, but I have no
+ * numbers that indicate it's worthwhile.
+ */
+ for (dir = UP, level = LEAFLEVEL;; dir == UP ? ++level : --level) {
+ /*
+ * Acquire a page and its parent, locked.
+ */
+ if ((ret = (dbc->dbtype == DB_BTREE ?
+ __bam_search(dbc, PGNO_INVALID,
+ arg, S_WRPAIR, level, NULL, &exact) :
+ __bam_rsearch(dbc,
+ (db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0)
+ return (ret);
+
+ if (root_pgnop != NULL)
+ *root_pgnop = cp->csp[0].page->pgno == root_pgno ?
+ root_pgno : cp->csp[-1].page->pgno;
+ /*
+ * Split the page if it still needs it (it's possible another
+ * thread of control has already split the page). If we are
+ * guaranteed that two items will fit on the page, the split
+ * is no longer necessary.
+ */
+ if (2 * B_MAXSIZEONPAGE(cp->ovflsize)
+ <= (db_indx_t)P_FREESPACE(dbc->dbp, cp->csp[0].page)) {
+ __bam_stkrel(dbc, STK_NOLOCK);
+ return (0);
+ }
+ ret = cp->csp[0].page->pgno == root_pgno ?
+ __bam_root(dbc, &cp->csp[0]) :
+ __bam_page(dbc, &cp->csp[-1], &cp->csp[0]);
+ BT_STK_CLR(cp);
+
+ switch (ret) {
+ case 0:
+ /* Once we've split the leaf page, we're done. */
+ if (level == LEAFLEVEL)
+ return (0);
+
+ /* Switch directions. */
+ if (dir == UP)
+ dir = DOWN;
+ break;
+ case DB_NEEDSPLIT:
+ /*
+ * It's possible to fail to split repeatedly, as other
+ * threads may be modifying the tree, or the page usage
+ * is sufficiently bad that we don't get enough space
+ * the first time.
+ */
+ if (dir == DOWN)
+ dir = UP;
+ break;
+ default:
+ return (ret);
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __bam_root --
+ * Split the root page of a btree.
+ */
+static int
+__bam_root(dbc, cp)
+ DBC *dbc;
+ EPG *cp;
+{
+ DB *dbp;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *lp, *rp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /* Yeah, right. */
+ if (cp->page->level >= MAXBTREELEVEL) {
+ __db_err(dbp->dbenv,
+ "Too many btree levels: %d", cp->page->level);
+ ret = ENOSPC;
+ goto err;
+ }
+
+ /* Create new left and right pages for the split. */
+ lp = rp = NULL;
+ if ((ret = __db_new(dbc, TYPE(cp->page), &lp)) != 0 ||
+ (ret = __db_new(dbc, TYPE(cp->page), &rp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, lp->pgno,
+ PGNO_INVALID, ISINTERNAL(cp->page) ? PGNO_INVALID : rp->pgno,
+ cp->page->level, TYPE(cp->page));
+ P_INIT(rp, dbp->pgsize, rp->pgno,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : lp->pgno, PGNO_INVALID,
+ cp->page->level, TYPE(cp->page));
+
+ /* Split the page. */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(
+ (BTREE_CURSOR *)dbc->internal, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp,
+ dbc->txn, &LSN(cp->page), 0, PGNO(lp), &LSN(lp), PGNO(rp),
+ &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn,
+ dbc->internal->root, &log_dbt, opflags)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+
+ /* Clean up the new root page. */
+ if ((ret = (dbc->dbtype == DB_RECNO ?
+ __ram_root(dbc, cp->page, lp, rp) :
+ __bam_broot(dbc, cp->page, lp, rp))) != 0)
+ goto err;
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ cp->page->pgno, lp->pgno, rp->pgno, split, 1)) != 0)
+ goto err;
+
+ /* Success -- write the real pages back to the store. */
+ (void)mpf->put(mpf, cp->page, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, cp->lock);
+ (void)mpf->put(mpf, lp, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, rp, DB_MPOOL_DIRTY);
+
+ return (0);
+
+err: if (lp != NULL)
+ (void)mpf->put(mpf, lp, 0);
+ if (rp != NULL)
+ (void)mpf->put(mpf, rp, 0);
+ (void)mpf->put(mpf, cp->page, 0);
+ (void)__TLPUT(dbc, cp->lock);
+ return (ret);
+}
+
+/*
+ * __bam_page --
+ * Split the non-root page of a btree.
+ */
+static int
+__bam_page(dbc, pp, cp)
+ DBC *dbc;
+ EPG *pp, *cp;
+{
+ BTREE_CURSOR *bc;
+ DBT log_dbt;
+ DB_LSN log_lsn;
+ DB *dbp;
+ DB_LOCK rplock, tplock;
+ DB_MPOOLFILE *mpf;
+ DB_LSN save_lsn;
+ PAGE *lp, *rp, *alloc_rp, *tp;
+ db_indx_t split;
+ u_int32_t opflags;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ alloc_rp = lp = rp = tp = NULL;
+ LOCK_INIT(rplock);
+ LOCK_INIT(tplock);
+ ret = -1;
+
+ /*
+ * Create a new right page for the split, and fill in everything
+ * except its LSN and page number.
+ *
+ * We malloc space for both the left and right pages, so we don't get
+ * a new page from the underlying buffer pool until we know the split
+ * is going to succeed. The reason is that we can't release locks
+ * acquired during the get-a-new-page process because metadata page
+ * locks can't be discarded on failure since we may have modified the
+ * free list. So, if you assume that we're holding a write lock on the
+ * leaf page which ran out of space and started this split (e.g., we
+ * have already written records to the page, or we retrieved a record
+ * from it with the DB_RMW flag set), failing in a split with both a
+ * leaf page locked and the metadata page locked can potentially lock
+ * up the tree badly, because we've violated the rule of always locking
+ * down the tree, and never up.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &rp)) != 0)
+ goto err;
+ P_INIT(rp, dbp->pgsize, 0,
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : NEXT_PGNO(cp->page),
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Create new left page for the split, and fill in everything
+ * except its LSN and next-page page number.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &lp)) != 0)
+ goto err;
+ P_INIT(lp, dbp->pgsize, PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : PREV_PGNO(cp->page),
+ ISINTERNAL(cp->page) ? PGNO_INVALID : 0,
+ cp->page->level, TYPE(cp->page));
+
+ /*
+ * Split right.
+ *
+ * Only the indices are sorted on the page, i.e., the key/data pairs
+ * aren't, so it's simpler to copy the data from the split page onto
+ * two new pages instead of copying half the data to a new right page
+ * and compacting the left page in place. Since the left page can't
+ * change, we swap the original and the allocated left page after the
+ * split.
+ */
+ if ((ret = __bam_psplit(dbc, cp, lp, rp, &split)) != 0)
+ goto err;
+
+ /*
+ * Test to see if we are going to be able to insert the new pages into
+ * the parent page. The interesting failure here is that the parent
+ * page can't hold the new keys, and has to be split in turn, in which
+ * case we want to release all the locks we can.
+ */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 1)) != 0)
+ goto err;
+
+ /*
+ * Fix up the previous pointer of any leaf page following the split
+ * page.
+ *
+ * There's interesting deadlock situations here as we try to write-lock
+ * a page that's not in our direct ancestry. Consider a cursor walking
+ * backward through the leaf pages, that has our following page locked,
+ * and is waiting on a lock for the page we're splitting. In that case
+ * we're going to deadlock here . It's probably OK, stepping backward
+ * through the tree isn't a common operation.
+ */
+ if (ISLEAF(cp->page) && NEXT_PGNO(cp->page) != PGNO_INVALID) {
+ if ((ret = __db_lget(dbc,
+ 0, NEXT_PGNO(cp->page), DB_LOCK_WRITE, 0, &tplock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0)
+ goto err;
+ }
+
+ /*
+ * We've got everything locked down we need, and we know the split
+ * is going to succeed. Go and get the additional page we'll need.
+ */
+ if ((ret = __db_new(dbc, TYPE(cp->page), &alloc_rp)) != 0)
+ goto err;
+
+ /*
+ * Lock the new page. We need to do this because someone
+ * could get here through bt_lpgno if this page was recently
+ * dealocated. They can't look at it before we commit.
+ */
+ if ((ret = __db_lget(dbc,
+ 0, PGNO(alloc_rp), DB_LOCK_WRITE, 0, &rplock)) != 0)
+ goto err;
+
+ /*
+ * Fix up the page numbers we didn't have before. We have to do this
+ * before calling __bam_pinsert because it may copy a page number onto
+ * the parent page and it takes the page number from its page argument.
+ */
+ PGNO(rp) = NEXT_PGNO(lp) = PGNO(alloc_rp);
+
+ /* Actually update the parent page. */
+ if ((ret = __bam_pinsert(dbc, pp, lp, rp, 0)) != 0)
+ goto err;
+
+ bc = (BTREE_CURSOR *)dbc->internal;
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ log_dbt.data = cp->page;
+ log_dbt.size = dbp->pgsize;
+ if (tp == NULL)
+ ZERO_LSN(log_lsn);
+ opflags = F_ISSET(bc, C_RECNUM) ? SPL_NRECS : 0;
+ if ((ret = __bam_split_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), PGNO(alloc_rp),
+ &LSN(alloc_rp), (u_int32_t)NUM_ENT(lp),
+ tp == NULL ? 0 : PGNO(tp),
+ tp == NULL ? &log_lsn : &LSN(tp),
+ PGNO_INVALID, &log_dbt, opflags)) != 0)
+ goto err;
+
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+
+ /* Update the LSNs for all involved pages. */
+ LSN(alloc_rp) = LSN(cp->page);
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+ if (tp != NULL)
+ LSN(tp) = LSN(cp->page);
+
+ /*
+ * Copy the left and right pages into place. There are two paths
+ * through here. Either we are logging and we set the LSNs in the
+ * logging path. However, if we are not logging, then we do not
+ * have valid LSNs on lp or rp. The correct LSNs to use are the
+ * ones on the page we got from __db_new or the one that was
+ * originally on cp->page. In both cases, we save the LSN from the
+ * real database page (not a malloc'd one) and reapply it after we
+ * do the copy.
+ */
+ save_lsn = alloc_rp->lsn;
+ memcpy(alloc_rp, rp, LOFFSET(dbp, rp));
+ memcpy((u_int8_t *)alloc_rp + HOFFSET(rp),
+ (u_int8_t *)rp + HOFFSET(rp), dbp->pgsize - HOFFSET(rp));
+ alloc_rp->lsn = save_lsn;
+
+ save_lsn = cp->page->lsn;
+ memcpy(cp->page, lp, LOFFSET(dbp, lp));
+ memcpy((u_int8_t *)cp->page + HOFFSET(lp),
+ (u_int8_t *)lp + HOFFSET(lp), dbp->pgsize - HOFFSET(lp));
+ cp->page->lsn = save_lsn;
+
+ /* Fix up the next-page link. */
+ if (tp != NULL)
+ PREV_PGNO(tp) = PGNO(rp);
+
+ /* Adjust any cursors. */
+ if ((ret = __bam_ca_split(dbc,
+ PGNO(cp->page), PGNO(cp->page), PGNO(rp), split, 0)) != 0)
+ goto err;
+
+ __os_free(dbp->dbenv, lp);
+ __os_free(dbp->dbenv, rp);
+
+ /*
+ * Success -- write the real pages back to the store. As we never
+ * acquired any sort of lock on the new page, we release it before
+ * releasing locks on the pages that reference it. We're finished
+ * modifying the page so it's not really necessary, but it's neater.
+ */
+ if ((t_ret = mpf->put(mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, rplock);
+ if ((t_ret = mpf->put(mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, pp->lock);
+ if ((t_ret = mpf->put(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, cp->lock);
+ if (tp != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ (void)__TLPUT(dbc, tplock);
+ }
+ return (ret);
+
+err: if (lp != NULL)
+ __os_free(dbp->dbenv, lp);
+ if (rp != NULL)
+ __os_free(dbp->dbenv, rp);
+ if (alloc_rp != NULL)
+ (void)mpf->put(mpf, alloc_rp, 0);
+ if (tp != NULL)
+ (void)mpf->put(mpf, tp, 0);
+
+ /* We never updated the new or next pages, we can release them. */
+ (void)__LPUT(dbc, rplock);
+ (void)__LPUT(dbc, tplock);
+
+ (void)mpf->put(mpf, pp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, pp->lock);
+ else
+ (void)__TLPUT(dbc, pp->lock);
+
+ (void)mpf->put(mpf, cp->page, 0);
+ if (ret == DB_NEEDSPLIT)
+ (void)__LPUT(dbc, cp->lock);
+ else
+ (void)__TLPUT(dbc, cp->lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_broot --
+ * Fix up the btree root page after it has been split.
+ */
+static int
+__bam_broot(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT hdr, data;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * If the root page was a leaf page, change it into an internal page.
+ * We copy the key we split on (but not the key's data, in the case of
+ * a leaf page) to the new root page.
+ */
+ root_pgno = cp->root;
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IBTREE);
+
+ memset(&data, 0, sizeof(data));
+ memset(&hdr, 0, sizeof(hdr));
+
+ /*
+ * The btree comparison code guarantees that the left-most key on any
+ * internal btree page is never used, so it doesn't need to be filled
+ * in. Set the record count if necessary.
+ */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = 0;
+ B_TSET(bi.type, B_KEYDATA, 0);
+ bi.pgno = lp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, lp);
+ RE_NREC_SET(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ if ((ret =
+ __db_pitem(dbc, rootp, 0, BINTERNAL_SIZE(0), &hdr, NULL)) != 0)
+ return (ret);
+
+ switch (TYPE(rp)) {
+ case P_IBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bi = GET_BINTERNAL(dbp, rp, 0);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ /* Copy the first key of the child page onto the root page. */
+ child_bk = GET_BKEYDATA(dbp, rp, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk->data;
+ data.size = child_bk->len;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(child_bk->len), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rp->pgno;
+ if (F_ISSET(cp, C_RECNUM)) {
+ bi.nrecs = __bam_total(dbp, rp);
+ RE_NREC_ADJ(rootp, bi.nrecs);
+ }
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, rootp, 1,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __ram_root --
+ * Fix up the recno root page after it has been split.
+ */
+static int
+__ram_root(dbc, rootp, lp, rp)
+ DBC *dbc;
+ PAGE *rootp, *lp, *rp;
+{
+ DB *dbp;
+ DBT hdr;
+ RINTERNAL ri;
+ db_pgno_t root_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ root_pgno = dbc->internal->root;
+
+ /* Initialize the page. */
+ P_INIT(rootp, dbp->pgsize,
+ root_pgno, PGNO_INVALID, PGNO_INVALID, lp->level + 1, P_IRECNO);
+
+ /* Initialize the header. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+
+ /* Insert the left and right keys, set the header information. */
+ ri.pgno = lp->pgno;
+ ri.nrecs = __bam_total(dbp, lp);
+ if ((ret = __db_pitem(dbc, rootp, 0, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_SET(rootp, ri.nrecs);
+ ri.pgno = rp->pgno;
+ ri.nrecs = __bam_total(dbp, rp);
+ if ((ret = __db_pitem(dbc, rootp, 1, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ RE_NREC_ADJ(rootp, ri.nrecs);
+ return (0);
+}
+
+/*
+ * __bam_pinsert --
+ * Insert a new key into a parent page, completing the split.
+ */
+static int
+__bam_pinsert(dbc, parent, lchild, rchild, space_check)
+ DBC *dbc;
+ EPG *parent;
+ PAGE *lchild, *rchild;
+ int space_check;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk, *tmp_bk;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DB *dbp;
+ DBT a, b, hdr, data;
+ PAGE *ppage;
+ RINTERNAL ri;
+ db_indx_t off;
+ db_recno_t nrecs;
+ size_t (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t n, nbytes, nksize;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ t = dbp->bt_internal;
+ ppage = parent->page;
+
+ /* If handling record numbers, count records split to the right page. */
+ nrecs = F_ISSET(cp, C_RECNUM) &&
+ !space_check ? __bam_total(dbp, rchild) : 0;
+
+ /*
+ * Now we insert the new page's first key into the parent page, which
+ * completes the split. The parent points to a PAGE and a page index
+ * offset, where the new key goes ONE AFTER the index, because we split
+ * to the right.
+ *
+ * XXX
+ * Some btree algorithms replace the key for the old page as well as
+ * the new page. We don't, as there's no reason to believe that the
+ * first key on the old page is any better than the key we have, and,
+ * in the case of a key being placed at index 0 causing the split, the
+ * key is unavailable.
+ */
+ off = parent->indx + O_INDX;
+
+ /*
+ * Calculate the space needed on the parent page.
+ *
+ * Prefix trees: space hack used when inserting into BINTERNAL pages.
+ * Retain only what's needed to distinguish between the new entry and
+ * the LAST entry on the page to its left. If the keys compare equal,
+ * retain the entire key. We ignore overflow keys, and the entire key
+ * must be retained for the next-to-leftmost key on the leftmost page
+ * of each level, or the search will fail. Applicable ONLY to internal
+ * pages that have leaf pages as children. Further reduction of the
+ * key between pairs of internal pages loses too much information.
+ */
+ switch (TYPE(rchild)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(dbp, rchild, 0);
+ nbytes = BINTERNAL_PSIZE(child_bi->len);
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&bi, 0, sizeof(bi));
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bi->data;
+ data.size = child_bi->len;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(child_bi->len), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)(child_bi->data))->pgno, 1)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ case P_LBTREE:
+ child_bk = GET_BKEYDATA(dbp, rchild, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ /*
+ * We set t->bt_prefix to NULL if we have a comparison
+ * callback but no prefix compression callback. But,
+ * if we're splitting in an off-page duplicates tree,
+ * we still have to do some checking. If using the
+ * default off-page duplicates comparison routine we
+ * can use the default prefix compression callback. If
+ * not using the default off-page duplicates comparison
+ * routine, we can't do any kind of prefix compression
+ * as there's no way for an application to specify a
+ * prefix compression callback that corresponds to its
+ * comparison callback.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ if (dbp->dup_compare == __bam_defcmp)
+ func = __bam_defpfx;
+ else
+ func = NULL;
+ } else
+ func = t->bt_prefix;
+
+ nbytes = BINTERNAL_PSIZE(child_bk->len);
+ nksize = child_bk->len;
+ if (func == NULL)
+ goto noprefix;
+ if (ppage->prev_pgno == PGNO_INVALID && off <= 1)
+ goto noprefix;
+ tmp_bk = GET_BKEYDATA(dbp, lchild, NUM_ENT(lchild) -
+ (TYPE(lchild) == P_LDUP ? O_INDX : P_INDX));
+ if (B_TYPE(tmp_bk->type) != B_KEYDATA)
+ goto noprefix;
+ memset(&a, 0, sizeof(a));
+ a.size = tmp_bk->len;
+ a.data = tmp_bk->data;
+ memset(&b, 0, sizeof(b));
+ b.size = child_bk->len;
+ b.data = child_bk->data;
+ nksize = (u_int32_t)func(dbp, &a, &b);
+ if ((n = BINTERNAL_PSIZE(nksize)) < nbytes)
+ nbytes = n;
+ else
+noprefix: nksize = child_bk->len;
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = nksize;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk->data;
+ data.size = nksize;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(nksize), &hdr, &data)) != 0)
+ return (ret);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ nbytes = BINTERNAL_PSIZE(BOVERFLOW_SIZE);
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ memset(&bi, 0, sizeof(bi));
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = rchild->pgno;
+ bi.nrecs = nrecs;
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &bi;
+ hdr.size = SSZA(BINTERNAL, data);
+ memset(&data, 0, sizeof(data));
+ data.data = child_bk;
+ data.size = BOVERFLOW_SIZE;
+ if ((ret = __db_pitem(dbc, ppage, off,
+ BINTERNAL_SIZE(BOVERFLOW_SIZE), &hdr, &data)) != 0)
+ return (ret);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bk->type) == B_OVERFLOW)
+ if ((ret = __db_ovref(dbc,
+ ((BOVERFLOW *)child_bk)->pgno, 1)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ nbytes = RINTERNAL_PSIZE;
+
+ if (P_FREESPACE(dbp, ppage) < nbytes)
+ return (DB_NEEDSPLIT);
+ if (space_check)
+ return (0);
+
+ /* Add a new record for the right page. */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.data = &ri;
+ hdr.size = RINTERNAL_SIZE;
+ ri.pgno = rchild->pgno;
+ ri.nrecs = nrecs;
+ if ((ret = __db_pitem(dbc,
+ ppage, off, RINTERNAL_SIZE, &hdr, NULL)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
+ }
+
+ /*
+ * If a Recno or Btree with record numbers AM page, or an off-page
+ * duplicates tree, adjust the parent page's left page record count.
+ */
+ if (F_ISSET(cp, C_RECNUM)) {
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(ppage), 0, PGNO(ppage),
+ &LSN(ppage), parent->indx, -(int32_t)nrecs, 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(ppage));
+
+ /* Update the left page count. */
+ if (dbc->dbtype == DB_RECNO)
+ GET_RINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
+ else
+ GET_BINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_psplit --
+ * Do the real work of splitting the page.
+ */
+static int
+__bam_psplit(dbc, cp, lp, rp, splitret)
+ DBC *dbc;
+ EPG *cp;
+ PAGE *lp, *rp;
+ db_indx_t *splitret;
+{
+ DB *dbp;
+ PAGE *pp;
+ db_indx_t half, *inp, nbytes, off, splitp, top;
+ int adjust, cnt, iflag, isbigkey, ret;
+
+ dbp = dbc->dbp;
+ pp = cp->page;
+ inp = P_INP(dbp, pp);
+ adjust = TYPE(pp) == P_LBTREE ? P_INDX : O_INDX;
+
+ /*
+ * If we're splitting the first (last) page on a level because we're
+ * inserting (appending) a key to it, it's likely that the data is
+ * sorted. Moving a single item to the new page is less work and can
+ * push the fill factor higher than normal. This is trivial when we
+ * are splitting a new page before the beginning of the tree, all of
+ * the interesting tests are against values of 0.
+ *
+ * Catching appends to the tree is harder. In a simple append, we're
+ * inserting an item that sorts past the end of the tree; the cursor
+ * will point past the last element on the page. But, in trees with
+ * duplicates, the cursor may point to the last entry on the page --
+ * in this case, the entry will also be the last element of a duplicate
+ * set (the last because the search call specified the S_DUPLAST flag).
+ * The only way to differentiate between an insert immediately before
+ * the last item in a tree or an append after a duplicate set which is
+ * also the last item in the tree is to call the comparison function.
+ * When splitting internal pages during an append, the search code
+ * guarantees the cursor always points to the largest page item less
+ * than the new internal entry. To summarize, we want to catch three
+ * possible index values:
+ *
+ * NUM_ENT(page) Btree/Recno leaf insert past end-of-tree
+ * NUM_ENT(page) - O_INDX Btree or Recno internal insert past EOT
+ * NUM_ENT(page) - P_INDX Btree leaf insert past EOT after a set
+ * of duplicates
+ *
+ * two of which, (NUM_ENT(page) - O_INDX or P_INDX) might be an insert
+ * near the end of the tree, and not after the end of the tree at all.
+ * Do a simple test which might be wrong because calling the comparison
+ * functions is expensive. Regardless, it's not a big deal if we're
+ * wrong, we'll do the split the right way next time.
+ */
+ off = 0;
+ if (NEXT_PGNO(pp) == PGNO_INVALID && cp->indx >= NUM_ENT(pp) - adjust)
+ off = NUM_ENT(pp) - adjust;
+ else if (PREV_PGNO(pp) == PGNO_INVALID && cp->indx == 0)
+ off = adjust;
+ if (off != 0)
+ goto sort;
+
+ /*
+ * Split the data to the left and right pages. Try not to split on
+ * an overflow key. (Overflow keys on internal pages will slow down
+ * searches.) Refuse to split in the middle of a set of duplicates.
+ *
+ * First, find the optimum place to split.
+ *
+ * It's possible to try and split past the last record on the page if
+ * there's a very large record at the end of the page. Make sure this
+ * doesn't happen by bounding the check at the next-to-last entry on
+ * the page.
+ *
+ * Note, we try and split half the data present on the page. This is
+ * because another process may have already split the page and left
+ * it half empty. We don't try and skip the split -- we don't know
+ * how much space we're going to need on the page, and we may need up
+ * to half the page for a big item, so there's no easy test to decide
+ * if we need to split or not. Besides, if two threads are inserting
+ * data into the same place in the database, we're probably going to
+ * need more space soon anyway.
+ */
+ top = NUM_ENT(pp) - adjust;
+ half = (dbp->pgsize - HOFFSET(pp)) / 2;
+ for (nbytes = 0, off = 0; off < top && nbytes < half; ++off)
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA)
+ nbytes += BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, off)->len);
+ else
+ nbytes += BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+
+ ++off;
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
+ else
+ nbytes += BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes += RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
+ }
+sort: splitp = off;
+
+ /*
+ * Splitp is either at or just past the optimum split point. If the
+ * tree type is such that we're going to promote a key to an internal
+ * page, and our current choice is an overflow key, look for something
+ * close by that's smaller.
+ */
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ iflag = 1;
+ isbigkey =
+ B_TYPE(GET_BINTERNAL(dbp, pp, off)->type) != B_KEYDATA;
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ iflag = 0;
+ isbigkey = B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) !=
+ B_KEYDATA;
+ break;
+ default:
+ iflag = isbigkey = 0;
+ }
+ if (isbigkey)
+ for (cnt = 1; cnt <= 3; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < (db_indx_t)NUM_ENT(pp) &&
+ ((iflag && B_TYPE(
+ GET_BINTERNAL(dbp, pp,off)->type) == B_KEYDATA) ||
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (iflag ? B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA :
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA) {
+ splitp = off;
+ break;
+ }
+ }
+
+ /*
+ * We can't split in the middle a set of duplicates. We know that
+ * no duplicate set can take up more than about 25% of the page,
+ * because that's the point where we push it off onto a duplicate
+ * page set. So, this loop can't be unbounded.
+ */
+ if (TYPE(pp) == P_LBTREE &&
+ inp[splitp] == inp[splitp - adjust])
+ for (cnt = 1;; ++cnt) {
+ off = splitp + cnt * adjust;
+ if (off < NUM_ENT(pp) &&
+ inp[splitp] != inp[off]) {
+ splitp = off;
+ break;
+ }
+ if (splitp <= (db_indx_t)(cnt * adjust))
+ continue;
+ off = splitp - cnt * adjust;
+ if (inp[splitp] != inp[off]) {
+ splitp = off + adjust;
+ break;
+ }
+ }
+
+ /* We're going to split at splitp. */
+ if ((ret = __bam_copy(dbp, pp, lp, 0, splitp)) != 0)
+ return (ret);
+ if ((ret = __bam_copy(dbp, pp, rp, splitp, NUM_ENT(pp))) != 0)
+ return (ret);
+
+ *splitret = splitp;
+ return (0);
+}
+
+/*
+ * __bam_copy --
+ * Copy a set of records from one page to another.
+ *
+ * PUBLIC: int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__bam_copy(dbp, pp, cp, nxt, stop)
+ DB *dbp;
+ PAGE *pp, *cp;
+ u_int32_t nxt, stop;
+{
+ db_indx_t *cinp, nbytes, off, *pinp;
+
+ cinp = P_INP(dbp, cp);
+ pinp = P_INP(dbp, pp);
+ /*
+ * Nxt is the offset of the next record to be placed on the target page.
+ */
+ for (off = 0; nxt < stop; ++nxt, ++NUM_ENT(cp), ++off) {
+ switch (TYPE(pp)) {
+ case P_IBTREE:
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, nxt)->type) == B_KEYDATA)
+ nbytes = BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, nxt)->len);
+ else
+ nbytes = BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ break;
+ case P_LBTREE:
+ /*
+ * If we're on a key and it's a duplicate, just copy
+ * the offset.
+ */
+ if (off != 0 && (nxt % P_INDX) == 0 &&
+ pinp[nxt] == pinp[nxt - P_INDX]) {
+ cinp[off] = cinp[off - P_INDX];
+ continue;
+ }
+ /* FALLTHROUGH */
+ case P_LDUP:
+ case P_LRECNO:
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, nxt)->type) ==
+ B_KEYDATA)
+ nbytes = BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, nxt)->len);
+ else
+ nbytes = BOVERFLOW_SIZE;
+ break;
+ case P_IRECNO:
+ nbytes = RINTERNAL_SIZE;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
+ }
+ cinp[off] = HOFFSET(cp) -= nbytes;
+ memcpy(P_ENTRY(dbp, cp, off), P_ENTRY(dbp, pp, nxt), nbytes);
+ }
+ return (0);
+}
diff --git a/storage/bdb/btree/bt_stat.c b/storage/bdb/btree/bt_stat.c
new file mode 100644
index 00000000000..4428de98294
--- /dev/null
+++ b/storage/bdb/btree/bt_stat.c
@@ -0,0 +1,481 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_stat.c,v 11.52 2002/05/30 15:40:27 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+/*
+ * __bam_stat --
+ * Gather/print the btree statistics
+ *
+ * PUBLIC: int __bam_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__bam_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ BTMETA *meta;
+ BTREE *t;
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ DB_BTREE_STAT *sp;
+ DB_LOCK lock, metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret, t_ret, write_meta;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ meta = NULL;
+ t = dbp->bt_internal;
+ sp = NULL;
+ LOCK_INIT(metalock);
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = 0;
+ write_meta = 0;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ DEBUG_LWRITE(dbc, NULL, "bam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ /* Get the metadata page for the entire database. */
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (flags == DB_RECORDCOUNT || flags == DB_CACHED_COUNTS)
+ flags = DB_FAST_STAT;
+ if (flags == DB_FAST_STAT)
+ goto meta_only;
+
+ /* Walk the metadata free list, counting pages. */
+ for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
+ ++sp->bt_free;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ }
+
+ /* Get the root page. */
+ pgno = cp->root;
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /* Get the levels from the root page. */
+ sp->bt_levels = h->level;
+
+ /* Discard the root page. */
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ goto err;
+ h = NULL;
+ __LPUT(dbc, lock);
+
+ /* Walk the tree. */
+ if ((ret = __bam_traverse(dbc,
+ DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0)
+ goto err;
+
+ /*
+ * Get the subdatabase metadata page if it's not the same as the
+ * one we already have.
+ */
+ write_meta = !F_ISSET(dbp, DB_AM_RDONLY);
+meta_only:
+ if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) {
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ __LPUT(dbc, metalock);
+
+ if ((ret = __db_lget(dbc,
+ 0, t->bt_meta, write_meta == 0 ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ }
+ if (flags == DB_FAST_STAT) {
+ if (dbp->type == DB_RECNO ||
+ (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) {
+ if ((ret = __db_lget(dbc, 0,
+ cp->root, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &cp->root, 0, (PAGE **)&h)) != 0)
+ goto err;
+
+ sp->bt_nkeys = RE_NREC(h);
+ } else
+ sp->bt_nkeys = meta->dbmeta.key_count;
+ sp->bt_ndata = meta->dbmeta.record_count;
+ }
+
+ /* Get metadata page statistics. */
+ sp->bt_metaflags = meta->dbmeta.flags;
+ sp->bt_maxkey = meta->maxkey;
+ sp->bt_minkey = meta->minkey;
+ sp->bt_re_len = meta->re_len;
+ sp->bt_re_pad = meta->re_pad;
+ sp->bt_pagesize = meta->dbmeta.pagesize;
+ sp->bt_magic = meta->dbmeta.magic;
+ sp->bt_version = meta->dbmeta.version;
+
+ if (write_meta != 0) {
+ meta->dbmeta.key_count = sp->bt_nkeys;
+ meta->dbmeta.record_count = sp->bt_ndata;
+ }
+
+ *(DB_BTREE_STAT **)spp = sp;
+
+err: /* Discard the second page. */
+ __LPUT(dbc, lock);
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the metadata page. */
+ __LPUT(dbc, metalock);
+ if (meta != NULL && (t_ret = mpf->put(
+ mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0 && sp != NULL) {
+ __os_ufree(dbp->dbenv, sp);
+ *(DB_BTREE_STAT **)spp = NULL;
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_traverse --
+ * Walk a Btree database.
+ *
+ * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__bam_traverse(dbc, mode, root_pgno, callback, cookie)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t root_pgno;
+ int (*callback)__P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_indx_t indx;
+ int already_put, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ already_put = 0;
+
+ if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0) {
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bi = GET_BINTERNAL(dbp, h, indx);
+ if (B_TYPE(bi->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ ((BOVERFLOW *)bi->data)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if ((ret = __bam_traverse(
+ dbc, mode, bi->pgno, callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_IRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ ri = GET_RINTERNAL(dbp, h, indx);
+ if ((ret = __bam_traverse(
+ dbc, mode, ri->pgno, callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_LBTREE:
+ for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
+ if (B_TYPE(bk->type) == B_DUPLICATE &&
+ (ret = __bam_traverse(dbc, mode,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW &&
+ (ret = __db_traverse_big(dbp,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
+ callback, cookie)) != 0)
+ goto err;
+ }
+ break;
+ }
+
+ ret = callback(dbp, h, cookie, &already_put);
+
+err: if (!already_put && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret != 0)
+ ret = t_ret;
+ __LPUT(dbc, lock);
+
+ return (ret);
+}
+
+/*
+ * __bam_stat_callback --
+ * Statistics callback.
+ *
+ * PUBLIC: int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__bam_stat_callback(dbp, h, cookie, putp)
+ DB *dbp;
+ PAGE *h;
+ void *cookie;
+ int *putp;
+{
+ DB_BTREE_STAT *sp;
+ db_indx_t indx, *inp, top;
+ u_int8_t type;
+
+ sp = cookie;
+ *putp = 0;
+ top = NUM_ENT(h);
+ inp = P_INP(dbp, h);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ ++sp->bt_int_pg;
+ sp->bt_int_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_LBTREE:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ if (indx + P_INDX >= top ||
+ inp[indx] != inp[indx + P_INDX])
+ ++sp->bt_nkeys;
+
+ type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_LRECNO:
+ /*
+ * If walking a recno tree, then each of these items is a key.
+ * Otherwise, we're walking an off-page duplicate set.
+ */
+ if (dbp->type == DB_RECNO) {
+ sp->bt_nkeys += top;
+
+ /*
+ * Correct for deleted items in non-renumbering
+ * Recno databases.
+ */
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ sp->bt_ndata += top;
+ else
+ for (indx = 0; indx < top; indx += O_INDX) {
+ type = GET_BKEYDATA(dbp, h, indx)->type;
+ if (!B_DISSET(type))
+ ++sp->bt_ndata;
+ }
+
+ ++sp->bt_leaf_pg;
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
+ } else {
+ sp->bt_ndata += top;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
+ ++sp->bt_ndata;
+
+ ++sp->bt_dup_pg;
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
+ break;
+ case P_OVERFLOW:
+ ++sp->bt_over_pg;
+ sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+ return (0);
+}
+
+/*
+ * __bam_key_range --
+ * Return proportion of keys relative to given key. The numbers are
+ * slightly skewed due to on page duplicates.
+ *
+ * PUBLIC: int __bam_key_range __P((DB *,
+ * PUBLIC: DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ */
+int
+__bam_key_range(dbp, txn, dbt, kp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *dbt;
+ DB_KEY_RANGE *kp;
+ u_int32_t flags;
+{
+ BTREE_CURSOR *cp;
+ DBC *dbc;
+ EPG *sp;
+ double factor;
+ int exact, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->key_range");
+
+ if (flags != 0)
+ return (__db_ferr(dbp->dbenv, "DB->key_range", 0));
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "bam_key_range", NULL, NULL, 0);
+
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
+ goto err;
+
+ cp = (BTREE_CURSOR *)dbc->internal;
+ kp->less = kp->greater = 0.0;
+
+ factor = 1.0;
+ /* Correct the leaf page. */
+ cp->csp->entries /= 2;
+ cp->csp->indx /= 2;
+ for (sp = cp->sp; sp <= cp->csp; ++sp) {
+ /*
+ * At each level we know that pages greater than indx contain
+ * keys greater than what we are looking for and those less
+ * than indx are less than. The one pointed to by indx may
+ * have some less, some greater or even equal. If indx is
+ * equal to the number of entries, then the key is out of range
+ * and everything is less.
+ */
+ if (sp->indx == 0)
+ kp->greater += factor * (sp->entries - 1)/sp->entries;
+ else if (sp->indx == sp->entries)
+ kp->less += factor;
+ else {
+ kp->less += factor * sp->indx / sp->entries;
+ kp->greater += factor *
+ (sp->entries - sp->indx - 1) / sp->entries;
+ }
+ factor *= 1.0/sp->entries;
+ }
+
+ /*
+ * If there was an exact match then assign 1 n'th to the key itself.
+ * Otherwise that factor belongs to those greater than the key, unless
+ * the key was out of range.
+ */
+ if (exact)
+ kp->equal = factor;
+ else {
+ if (kp->less != 1)
+ kp->greater += factor;
+ kp->equal = 0;
+ }
+
+ BT_STK_CLR(cp);
+
+err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_upgrade.c b/storage/bdb/btree/bt_upgrade.c
new file mode 100644
index 00000000000..9f92648d739
--- /dev/null
+++ b/storage/bdb/btree/bt_upgrade.c
@@ -0,0 +1,162 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_upgrade.c,v 11.25 2002/08/06 06:11:13 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __bam_30_btreemeta --
+ * Upgrade the metadata pages from version 6 to version 7.
+ *
+ * PUBLIC: int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__bam_30_btreemeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ BTMETA30 *newmeta;
+ BTMETA2X *oldmeta;
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ newmeta = (BTMETA30 *)buf;
+ oldmeta = (BTMETA2X *)buf;
+
+ /*
+ * Move things from the end up, so we do not overwrite things.
+ * We are going to create a new uid, so we can move the stuff
+ * at the end of the structure first, overwriting the uid.
+ */
+
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ newmeta->dbmeta.free = oldmeta->free;
+ newmeta->dbmeta.flags = oldmeta->flags;
+ newmeta->dbmeta.type = P_BTREEMETA;
+
+ newmeta->dbmeta.version = 7;
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, buf + 36)) != 0)
+ return (ret);
+
+ newmeta->root = 1;
+
+ return (0);
+}
+
+/*
+ * __bam_31_btreemeta --
+ * Upgrade the database from version 7 to version 8.
+ *
+ * PUBLIC: int __bam_31_btreemeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BTMETA31 *newmeta;
+ BTMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (BTMETA31 *)h;
+ oldmeta = (BTMETA30 *)h;
+
+ /*
+ * Copy the effected fields down the page.
+ * The fields may overlap each other so we
+ * start at the bottom and use memmove.
+ */
+ newmeta->root = oldmeta->root;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->minkey = oldmeta->minkey;
+ newmeta->maxkey = oldmeta->maxkey;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Set the version number. */
+ newmeta->dbmeta.version = 8;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, BTM_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __bam_31_lbtree --
+ * Upgrade the database btree leaf pages.
+ *
+ * PUBLIC: int __bam_31_lbtree
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ BKEYDATA *bk;
+ db_pgno_t pgno;
+ db_indx_t indx;
+ int ret;
+
+ ret = 0;
+ for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) {
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ pgno = GET_BOVERFLOW(dbp, h, indx)->pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0)
+ break;
+ if (pgno != GET_BOVERFLOW(dbp, h, indx)->pgno) {
+ *dirtyp = 1;
+ GET_BOVERFLOW(dbp, h, indx)->pgno = pgno;
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/btree/bt_verify.c b/storage/bdb/btree/bt_verify.c
new file mode 100644
index 00000000000..0cf8a47e476
--- /dev/null
+++ b/storage/bdb/btree/bt_verify.c
@@ -0,0 +1,2387 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: bt_verify.c,v 1.76 2002/07/03 19:03:51 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: bt_verify.c,v 1.76 2002/07/03 19:03:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+
+static int __bam_safe_getdata __P((DB *, PAGE *, u_int32_t, int, DBT *, int *));
+static int __bam_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+static int __bam_vrfy_treeorder __P((DB *, db_pgno_t, PAGE *, BINTERNAL *,
+ BINTERNAL *, int (*)(DB *, const DBT *, const DBT *), u_int32_t));
+static int __ram_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ db_indx_t *, u_int32_t));
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)
+
+/*
+ * __bam_vrfy_meta --
+ * Verify the btree-specific part of a metadata page.
+ *
+ * PUBLIC: int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, t_ret, ret;
+ db_indx_t ovflsize;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ isbad = 0;
+
+ /*
+ * If VRFY_INCOMPLETE is not set, then we didn't come through
+ * __db_vrfy_pagezero and didn't incompletely
+ * check this page--we haven't checked it at all.
+ * Thus we need to call __db_vrfy_meta and check the common fields.
+ *
+ * If VRFY_INCOMPLETE is set, we've already done all the same work
+ * in __db_vrfy_pagezero, so skip the check.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &meta->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* bt_minkey: must be >= 2; must produce sensible ovflsize */
+
+ /* avoid division by zero */
+ ovflsize = meta->minkey > 0 ?
+ B_MINKEY_TO_OVFLSIZE(dbp, meta->minkey, dbp->pgsize) : 0;
+
+ if (meta->minkey < 2 ||
+ ovflsize > B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
+ pip->bt_minkey = 0;
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical bt_minkey value %lu on metadata page",
+ (u_long)pgno, (u_long)meta->minkey));
+ } else
+ pip->bt_minkey = meta->minkey;
+
+ /* bt_maxkey: no constraints (XXX: right?) */
+ pip->bt_maxkey = meta->maxkey;
+
+ /* re_len: no constraints on this (may be zero or huge--we make rope) */
+ pip->re_len = meta->re_len;
+
+ /*
+ * The root must not be current page or 0 and it must be within
+ * database. If this metadata page is the master meta data page
+ * of the file, then the root page had better be page 1.
+ */
+ pip->root = 0;
+ if (meta->root == PGNO_INVALID ||
+ meta->root == pgno || !IS_VALID_PGNO(meta->root) ||
+ (pgno == PGNO_BASE_MD && meta->root != 1)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical root page %lu on metadata page",
+ (u_long)pgno, (u_long)meta->root));
+ } else
+ pip->root = meta->root;
+
+ /* Flags. */
+ if (F_ISSET(&meta->dbmeta, BTM_RENUMBER))
+ F_SET(pip, VRFY_IS_RRECNO);
+
+ if (F_ISSET(&meta->dbmeta, BTM_SUBDB)) {
+ /*
+ * If this is a master db meta page, it had better not have
+ * duplicates.
+ */
+ if (F_ISSET(&meta->dbmeta, BTM_DUP) && pgno == PGNO_BASE_MD) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+"Page %lu: Btree metadata page has both duplicates and multiple databases",
+ (u_long)pgno));
+ }
+ F_SET(pip, VRFY_HAS_SUBDBS);
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&meta->dbmeta, BTM_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ if (F_ISSET(&meta->dbmeta, BTM_RECNUM))
+ F_SET(pip, VRFY_HAS_RECNUMS);
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Btree metadata page illegally has both recnums and dups",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_RECNO)) {
+ F_SET(pip, VRFY_IS_RECNO);
+ dbp->type = DB_RECNO;
+ } else if (F_ISSET(pip, VRFY_IS_RRECNO)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: metadata page has renumber flag set but is not recno",
+ (u_long)pgno));
+ }
+
+ if (F_ISSET(pip, VRFY_IS_RECNO) && F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno metadata page specifies duplicates",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ if (F_ISSET(&meta->dbmeta, BTM_FIXEDLEN))
+ F_SET(pip, VRFY_IS_FIXEDLEN);
+ else if (pip->re_len > 0) {
+ /*
+ * It's wrong to have an re_len if it's not a fixed-length
+ * database
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: re_len of %lu in non-fixed-length database",
+ (u_long)pgno, (u_long)pip->re_len));
+ }
+
+ /*
+ * We do not check that the rest of the page is 0, because it may
+ * not be and may still be correct.
+ */
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ram_vrfy_leaf --
+ * Verify a recno leaf page.
+ *
+ * PUBLIC: int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t re_len_guess, len;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_fchk(dbp->dbenv,
+ "__ram_vrfy_leaf", flags, OKFLAGS)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_LRECNO) {
+ /* We should not have been called. */
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_leaf", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Return immediately if it returns DB_VERIFY_BAD;
+ * further checks are dangerous.
+ */
+ if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Recno database has dups", (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Walk through inp and see if the lengths of all the records are the
+ * same--if so, this may be a fixed-length database, and we want to
+ * save off this value. We know inp to be safe if we've gotten this
+ * far.
+ */
+ re_len_guess = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ bk = GET_BKEYDATA(dbp, h, i);
+ /* KEYEMPTY. Go on. */
+ if (B_DISSET(bk->type))
+ continue;
+ if (bk->type == B_OVERFLOW)
+ len = ((BOVERFLOW *)bk)->tlen;
+ else if (bk->type == B_KEYDATA)
+ len = bk->len;
+ else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: nonsensical type for item %lu",
+ (u_long)pgno, (u_long)i));
+ continue;
+ }
+ if (re_len_guess == 0)
+ re_len_guess = len;
+
+ /*
+ * Is this item's len the same as the last one's? If not,
+ * reset to 0 and break--we don't have a single re_len.
+ * Otherwise, go on to the next item.
+ */
+ if (re_len_guess != len) {
+ re_len_guess = 0;
+ break;
+ }
+ }
+ pip->re_len = re_len_guess;
+
+ /* Save off record count. */
+ pip->rec_cnt = NUM_ENT(h);
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy --
+ * Verify a btree leaf or internal page.
+ *
+ * PUBLIC: int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Verify (and, if relevant, save off) page fields common to
+ * all PAGEs.
+ */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * The record count is, on internal pages, stored in an overloaded
+ * next_pgno field. Save it off; we'll verify it when we check
+ * overall database structure. We could overload the field
+ * in VRFY_PAGEINFO, too, but this seems gross, and space
+ * is not at such a premium.
+ */
+ pip->rec_cnt = RE_NREC(h);
+
+ /*
+ * Verify inp[].
+ */
+ if (TYPE(h) == P_IRECNO) {
+ if ((ret = __ram_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0)
+ goto err;
+ } else if ((ret = __bam_vrfy_inp(dbp,
+ vdp, h, pgno, &pip->entries, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item order check unsafe: skipping",
+ (u_long)pgno));
+ } else if (!LF_ISSET(DB_NOORDERCHK) && (ret =
+ __bam_vrfy_itemorder(dbp, vdp, h, pgno, 0, 0, 0, flags)) != 0) {
+ /*
+ * We know that the elements of inp are reasonable.
+ *
+ * Check that elements fall in the proper order.
+ */
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ram_vrfy_inp --
+ * Verify that all entries in a P_IRECNO inp[] array are reasonable,
+ * and count them. Note that P_LRECNO uses __bam_vrfy_inp;
+ * P_IRECNOs are a special, and simpler, case, since they have
+ * RINTERNALs rather than BKEYDATA/BINTERNALs.
+ */
+static int
+__ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret, isbad;
+ u_int32_t himark, i, offset, nentries;
+ db_indx_t *inp;
+ u_int8_t *pagelayout, *p;
+
+ isbad = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ nentries = 0;
+ pagelayout = NULL;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if (TYPE(h) != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ram_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ himark = dbp->pgsize;
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ inp = P_INP(dbp, h);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if ((u_int8_t *)inp + i >= (u_int8_t *)h + himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ offset = inp[i];
+ /*
+ * Check that the item offset is reasonable: it points
+ * somewhere after the inp array and before the end of the
+ * page.
+ */
+ if (offset <= (u_int32_t)((u_int8_t *)inp + i -
+ (u_int8_t *)h) ||
+ offset > (u_int32_t)(dbp->pgsize - RINTERNAL_SIZE)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad offset %lu at index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
+ continue;
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < himark)
+ himark = offset;
+
+ nentries++;
+
+ /* Make sure this RINTERNAL is not multiply referenced. */
+ ri = GET_RINTERNAL(dbp, h, i);
+ if (pagelayout[offset] == 0) {
+ pagelayout[offset] = 1;
+ child.pgno = ri->pgno;
+ child.type = V_RECNO;
+ child.nrecs = ri->nrecs;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ } else {
+ EPRINT((dbp->dbenv,
+ "Page %lu: RINTERNAL structure at offset %lu referenced twice",
+ (u_long)pgno, (u_long)offset));
+ isbad = 1;
+ }
+ }
+
+ for (p = pagelayout + himark;
+ p < pagelayout + dbp->pgsize;
+ p += RINTERNAL_SIZE)
+ if (*p != 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: gap between items at offset %lu",
+ (u_long)pgno, (u_long)(p - pagelayout)));
+ isbad = 1;
+ }
+
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)(HOFFSET(h)), (u_long)himark));
+ isbad = 1;
+ }
+
+ *nentriesp = nentries;
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagelayout != NULL)
+ __os_free(dbp->dbenv, pagelayout);
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_inp --
+ * Verify that all entries in inp[] array are reasonable;
+ * count them.
+ */
+static int
+__bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ db_indx_t *nentriesp;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ int isbad, initem, isdupitem, ret, t_ret;
+ u_int32_t himark, offset; /* These would be db_indx_ts but for algnmt.*/
+ u_int32_t i, endoff, nentries;
+ u_int8_t *pagelayout;
+
+ isbad = isdupitem = 0;
+ nentries = 0;
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ break;
+ default:
+ /*
+ * In the salvager, we might call this from a page which
+ * we merely suspect is a btree page. Otherwise, it
+ * shouldn't get called--if it is, that's a verifier bug.
+ */
+ if (LF_ISSET(DB_SALVAGE))
+ break;
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_inp", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Loop through inp[], the array of items, until we either
+ * run out of entries or collide with the data. Keep track
+ * of h_offset in himark.
+ *
+ * For each element in inp[i], make sure it references a region
+ * that starts after the end of the inp array (as defined by
+ * NUM_ENT(h)), ends before the beginning of the page, doesn't
+ * overlap any other regions, and doesn't have a gap between
+ * it and the region immediately after it.
+ */
+ himark = dbp->pgsize;
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
+ goto err;
+ memset(pagelayout, 0, dbp->pgsize);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ switch (ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, &offset)) {
+ case 0:
+ break;
+ case DB_VERIFY_BAD:
+ isbad = 1;
+ continue;
+ case DB_VERIFY_FATAL:
+ isbad = 1;
+ goto err;
+ default:
+ DB_ASSERT(ret != 0);
+ break;
+ }
+
+ /*
+ * We now have a plausible beginning for the item, and we know
+ * its length is safe.
+ *
+ * Mark the beginning and end in pagelayout so we can make sure
+ * items have no overlaps or gaps.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+#define ITEM_BEGIN 1
+#define ITEM_END 2
+ if (pagelayout[offset] == 0)
+ pagelayout[offset] = ITEM_BEGIN;
+ else if (pagelayout[offset] == ITEM_BEGIN) {
+ /*
+ * Having two inp entries that point at the same patch
+ * of page is legal if and only if the page is
+ * a btree leaf and they're onpage duplicate keys--
+ * that is, if (i % P_INDX) == 0.
+ */
+ if ((i % P_INDX == 0) && (TYPE(h) == P_LBTREE)) {
+ /* Flag for later. */
+ F_SET(pip, VRFY_HAS_DUPS);
+
+ /* Bump up nentries so we don't undercount. */
+ nentries++;
+
+ /*
+ * We'll check to make sure the end is
+ * equal, too.
+ */
+ isdupitem = 1;
+ } else {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
+ }
+ }
+
+ /*
+ * Mark the end. Its location varies with the page type
+ * and the item type.
+ *
+ * If the end already has a sign other than 0, do nothing--
+ * it's an overlap that we'll catch later.
+ */
+ switch(B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ if (TYPE(h) == P_IBTREE)
+ /* It's a BINTERNAL. */
+ endoff = offset + BINTERNAL_SIZE(bk->len) - 1;
+ else
+ endoff = offset + BKEYDATA_SIZE(bk->len) - 1;
+ break;
+ case B_DUPLICATE:
+ /*
+ * Flag that we have dups; we'll check whether
+ * that's okay during the structure check.
+ */
+ F_SET(pip, VRFY_HAS_DUPS);
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ /*
+ * Overflow entries on internal pages are stored
+ * as the _data_ of a BINTERNAL; overflow entries
+ * on leaf pages are stored as the entire entry.
+ */
+ endoff = offset +
+ ((TYPE(h) == P_IBTREE) ?
+ BINTERNAL_SIZE(BOVERFLOW_SIZE) :
+ BOVERFLOW_SIZE) - 1;
+ break;
+ default:
+ /*
+ * We'll complain later; for now, just mark
+ * a minimum.
+ */
+ endoff = offset + BKEYDATA_SIZE(0) - 1;
+ break;
+ }
+
+ /*
+ * If this is an onpage duplicate key we've seen before,
+ * the end had better coincide too.
+ */
+ if (isdupitem && pagelayout[endoff] != ITEM_END) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
+ isbad = 1;
+ } else if (pagelayout[endoff] == 0)
+ pagelayout[endoff] = ITEM_END;
+ isdupitem = 0;
+
+ /*
+ * There should be no deleted items in a quiescent tree,
+ * except in recno.
+ */
+ if (B_DISSET(bk->type) && TYPE(h) != P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu marked deleted",
+ (u_long)pgno, (u_long)i));
+ }
+
+ /*
+ * Check the type and such of bk--make sure it's reasonable
+ * for the pagetype.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ /*
+ * This is a normal, non-overflow BKEYDATA or BINTERNAL.
+ * The only thing to check is the len, and that's
+ * already been done.
+ */
+ break;
+ case B_DUPLICATE:
+ if (TYPE(h) == P_IBTREE) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate page referenced by internal btree page at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ } else if (TYPE(h) == P_LRECNO) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate page referenced by recno page at item %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+ /* FALLTHROUGH */
+ case B_OVERFLOW:
+ bo = (TYPE(h) == P_IBTREE) ?
+ (BOVERFLOW *)(((BINTERNAL *)bk)->data) :
+ (BOVERFLOW *)bk;
+
+ if (B_TYPE(bk->type) == B_OVERFLOW)
+ /* Make sure tlen is reasonable. */
+ if (bo->tlen > dbp->pgsize * vdp->last_pgno) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: impossible tlen %lu, item %lu",
+ (u_long)pgno,
+ (u_long)bo->tlen, (u_long)i));
+ /* Don't save as a child. */
+ break;
+ }
+
+ if (!IS_VALID_PGNO(bo->pgno) || bo->pgno == pgno ||
+ bo->pgno == PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pgno, (u_long)i, (u_long)bo->pgno));
+ /* Don't save as a child. */
+ break;
+ }
+
+ child.pgno = bo->pgno;
+ child.type = (B_TYPE(bk->type) == B_OVERFLOW ?
+ V_OVERFLOW : V_DUPLICATE);
+ child.tlen = bo->tlen;
+ if ((ret = __db_vrfy_childput(vdp, pgno, &child)) != 0)
+ goto err;
+ break;
+ default:
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu of invalid type %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+ }
+
+ /*
+ * Now, loop through and make sure the items are contiguous and
+ * non-overlapping.
+ */
+ initem = 0;
+ for (i = himark; i < dbp->pgsize; i++)
+ if (initem == 0)
+ switch (pagelayout[i]) {
+ case 0:
+ /* May be just for alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t)))
+ continue;
+
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: gap between items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ /* Find the end of the gap */
+ for ( ; pagelayout[i + 1] == 0 &&
+ (size_t)(i + 1) < dbp->pgsize; i++)
+ ;
+ break;
+ case ITEM_BEGIN:
+ /* We've found an item. Check its alignment. */
+ if (i != ALIGN(i, sizeof(u_int32_t))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: offset %lu unaligned",
+ (u_long)pgno, (u_long)i));
+ }
+ initem = 1;
+ nentries++;
+ break;
+ case ITEM_END:
+ /*
+ * We've hit the end of an item even though
+ * we don't think we're in one; must
+ * be an overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overlapping items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ default:
+ /* Should be impossible. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ else
+ switch (pagelayout[i]) {
+ case 0:
+ /* In the middle of an item somewhere. Okay. */
+ break;
+ case ITEM_END:
+ /* End of an item; switch to out-of-item mode.*/
+ initem = 0;
+ break;
+ case ITEM_BEGIN:
+ /*
+ * Hit a second item beginning without an
+ * end. Overlap.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overlapping items at offset %lu",
+ (u_long)pgno, (u_long)i));
+ break;
+ }
+
+ (void)__os_free(dbp->dbenv, pagelayout);
+
+ /* Verify HOFFSET. */
+ if ((db_indx_t)himark != HOFFSET(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)HOFFSET(h), (u_long)himark));
+ isbad = 1;
+ }
+
+err: if (nentriesp != NULL)
+ *nentriesp = nentries;
+
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_itemorder --
+ * Make sure the items on a page sort correctly.
+ *
+ * Assumes that NUM_ENT(h) and inp[0]..inp[NUM_ENT(h) - 1] are
+ * reasonable; be sure that __bam_vrfy_inp has been called first.
+ *
+ * If ovflok is set, it also assumes that overflow page chains
+ * hanging off the current page have been sanity-checked, and so we
+ * can use __bam_cmp to verify their ordering. If it is not set,
+ * and we run into an overflow page, carp and return DB_VERIFY_BAD;
+ * we shouldn't be called if any exist.
+ *
+ * PUBLIC: int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, int, u_int32_t));
+ */
+int
+__bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t nentries;
+ int ovflok, hasdups;
+ u_int32_t flags;
+{
+ DBT dbta, dbtb, dup_1, dup_2, *p1, *p2, *tmp;
+ BTREE *bt;
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ int cmp, freedup_1, freedup_2, isbad, ret, t_ret;
+ int (*dupfunc) __P((DB *, const DBT *, const DBT *));
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ void *buf1, *buf2, *tmpbuf;
+
+ /*
+ * We need to work in the ORDERCHKONLY environment where we might
+ * not have a pip, but we also may need to work in contexts where
+ * NUM_ENT isn't safe.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ nentries = pip->entries;
+ } else
+ pip = NULL;
+
+ ret = isbad = 0;
+ bo = NULL; /* Shut up compiler. */
+
+ memset(&dbta, 0, sizeof(DBT));
+ F_SET(&dbta, DB_DBT_REALLOC);
+
+ memset(&dbtb, 0, sizeof(DBT));
+ F_SET(&dbtb, DB_DBT_REALLOC);
+
+ buf1 = buf2 = NULL;
+
+ DB_ASSERT(!LF_ISSET(DB_NOORDERCHK));
+
+ dupfunc = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+ if (TYPE(h) == P_LDUP)
+ func = dupfunc;
+ else {
+ func = __bam_defcmp;
+ if (dbp->bt_internal != NULL) {
+ bt = (BTREE *)dbp->bt_internal;
+ if (bt->bt_compare != NULL)
+ func = bt->bt_compare;
+ }
+ }
+
+ /*
+ * We alternate our use of dbta and dbtb so that we can walk
+ * through the page key-by-key without copying a dbt twice.
+ * p1 is always the dbt for index i - 1, and p2 for index i.
+ */
+ p1 = &dbta;
+ p2 = &dbtb;
+
+ /*
+ * Loop through the entries. nentries ought to contain the
+ * actual count, and so is a safe way to terminate the loop; whether
+ * we inc. by one or two depends on whether we're a leaf page--
+ * on a leaf page, we care only about keys. On internal pages
+ * and LDUP pages, we want to check the order of all entries.
+ *
+ * Note that on IBTREE pages, we start with item 1, since item
+ * 0 doesn't get looked at by __bam_cmp.
+ */
+ for (i = (TYPE(h) == P_IBTREE) ? 1 : 0; i < nentries;
+ i += (TYPE(h) == P_LBTREE) ? P_INDX : O_INDX) {
+ /*
+ * Put key i-1, now in p2, into p1, by swapping DBTs and bufs.
+ */
+ tmp = p1;
+ p1 = p2;
+ p2 = tmp;
+ tmpbuf = buf1;
+ buf1 = buf2;
+ buf2 = tmpbuf;
+
+ /*
+ * Get key i into p2.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, i);
+ if (B_TYPE(bi->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)(bi->data);
+ goto overflow;
+ } else {
+ p2->data = bi->data;
+ p2->size = bi->len;
+ }
+
+ /*
+ * The leftmost key on an internal page must be
+ * len 0, since it's just a placeholder and
+ * automatically sorts less than all keys.
+ *
+ * XXX
+ * This criterion does not currently hold!
+ * See todo list item #1686. Meanwhile, it's harmless
+ * to just not check for it.
+ */
+#if 0
+ if (i == 0 && bi->len != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: lowest key on internal page of nonzero length",
+ (u_long)pgno));
+ }
+#endif
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ goto overflow;
+ } else {
+ p2->data = bk->data;
+ p2->size = bk->len;
+ }
+ break;
+ default:
+ /*
+ * This means our caller screwed up and sent us
+ * an inappropriate page.
+ */
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_itemorder", pgno, TYPE(h))
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (0) {
+ /*
+ * If ovflok != 1, we can't safely go chasing
+ * overflow pages with the normal routines now;
+ * they might be unsafe or nonexistent. Mark this
+ * page as incomplete and return.
+ *
+ * Note that we don't need to worry about freeing
+ * buffers, since they can't have been allocated
+ * if overflow items are unsafe.
+ */
+overflow: if (!ovflok) {
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * Overflow items are safe to chase. Do so.
+ * Fetch the overflow item into p2->data,
+ * NULLing it or reallocing it as appropriate.
+ *
+ * (We set p2->data to buf2 before the call
+ * so we're sure to realloc if we can and if p2
+ * was just pointing at a non-overflow item.)
+ */
+ p2->data = buf2;
+ if ((ret = __db_goff(dbp,
+ p2, bo->tlen, bo->pgno, NULL, NULL)) != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: error %lu in fetching overflow item %lu",
+ (u_long)pgno, (u_long)ret, (u_long)i));
+ }
+ /* In case it got realloc'ed and thus changed. */
+ buf2 = p2->data;
+ }
+
+ /* Compare with the last key. */
+ if (p1->data != NULL && p2->data != NULL) {
+ cmp = func(dbp, p1, p2);
+
+ /* comparison succeeded */
+ if (cmp > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: out-of-order key at entry %lu",
+ (u_long)pgno, (u_long)i));
+ /* proceed */
+ } else if (cmp == 0) {
+ /*
+ * If they compared equally, this
+ * had better be a (sub)database with dups.
+ * Mark it so we can check during the
+ * structure check.
+ */
+ if (pip != NULL)
+ F_SET(pip, VRFY_HAS_DUPS);
+ else if (hasdups == 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: database with no duplicates has duplicated keys",
+ (u_long)pgno));
+ }
+
+ /*
+ * If we're a btree leaf, check to see
+ * if the data items of these on-page dups are
+ * in sorted order. If not, flag this, so
+ * that we can make sure during the
+ * structure checks that the DUPSORT flag
+ * is unset.
+ *
+ * At this point i points to a duplicate key.
+ * Compare the datum before it (same key)
+ * to the datum after it, i.e. i-1 to i+1.
+ */
+ if (TYPE(h) == P_LBTREE) {
+ /*
+ * Unsafe; continue and we'll pick
+ * up the bogus nentries later.
+ */
+ if (i + 1 >= (db_indx_t)nentries)
+ continue;
+
+ /*
+ * We don't bother with clever memory
+ * management with on-page dups,
+ * as it's only really a big win
+ * in the overflow case, and overflow
+ * dups are probably (?) rare.
+ */
+ if (((ret = __bam_safe_getdata(dbp,
+ h, i - 1, ovflok, &dup_1,
+ &freedup_1)) != 0) ||
+ ((ret = __bam_safe_getdata(dbp,
+ h, i + 1, ovflok, &dup_2,
+ &freedup_2)) != 0))
+ goto err;
+
+ /*
+ * If either of the data are NULL,
+ * it's because they're overflows and
+ * it's not safe to chase them now.
+ * Mark an incomplete and return.
+ */
+ if (dup_1.data == NULL ||
+ dup_2.data == NULL) {
+ DB_ASSERT(!ovflok);
+ F_SET(pip, VRFY_INCOMPLETE);
+ goto err;
+ }
+
+ /*
+ * If the dups are out of order,
+ * flag this. It's not an error
+ * until we do the structure check
+ * and see whether DUPSORT is set.
+ */
+ if (dupfunc(dbp, &dup_1, &dup_2) > 0)
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+
+ if (freedup_1)
+ __os_ufree(dbp->dbenv,
+ dup_1.data);
+ if (freedup_2)
+ __os_ufree(dbp->dbenv,
+ dup_2.data);
+ }
+ }
+ }
+ }
+
+err: if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
+ ret = t_ret;
+
+ if (buf1 != NULL)
+ __os_ufree(dbp->dbenv, buf1);
+ if (buf2 != NULL)
+ __os_ufree(dbp->dbenv, buf2);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_structure --
+ * Verify the tree structure of a btree database (including the master
+ * database containing subdbs).
+ *
+ * PUBLIC: int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *mip, *rip;
+ db_pgno_t root, p;
+ int t_ret, ret;
+ u_int32_t nrecs, level, relen, stflags;
+
+ mip = rip = 0;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &mip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, (int *)&p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree metadata page observed twice",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ goto err;
+
+ root = mip->root;
+
+ if (root == 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree metadata page has no root",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, root, &rip)) != 0)
+ goto err;
+
+ switch (rip->type) {
+ case P_IBTREE:
+ case P_LBTREE:
+ stflags = flags | ST_TOPLEVEL;
+ if (F_ISSET(mip, VRFY_HAS_DUPS))
+ stflags |= ST_DUPOK;
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT))
+ stflags |= ST_DUPSORT;
+ if (F_ISSET(mip, VRFY_HAS_RECNUMS))
+ stflags |= ST_RECNUM;
+ ret = __bam_vrfy_subtree(dbp,
+ vdp, root, NULL, NULL, stflags, NULL, NULL, NULL);
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ stflags = flags | ST_RECNUM | ST_IS_RECNO | ST_TOPLEVEL;
+ if (mip->re_len > 0)
+ stflags |= ST_RELEN;
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ root, NULL, NULL, stflags, &level, &nrecs, &relen)) != 0)
+ goto err;
+ /*
+ * Even if mip->re_len > 0, re_len may come back zero if the
+ * tree is empty. It should be okay to just skip the check in
+ * this case, as if there are any non-deleted keys at all,
+ * that should never happen.
+ */
+ if (mip->re_len > 0 && relen > 0 && mip->re_len != relen) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno database has bad re_len %lu",
+ (u_long)meta_pgno, (u_long)relen));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ ret = 0;
+ break;
+ case P_LDUP:
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate tree referenced from metadata page",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree root of incorrect type %lu on metadata page",
+ (u_long)meta_pgno, (u_long)rip->type));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
+ ret = t_ret;
+ if (rip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, rip)) != 0) && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __bam_vrfy_subtree--
+ * Verify a subtree (or entire) btree with specified root.
+ *
+ * Note that this is public because it must be called to verify
+ * offpage dup trees, including from hash.
+ *
+ * PUBLIC: int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *,
+ * PUBLIC: void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__bam_vrfy_subtree(dbp,
+ vdp, pgno, l, r, flags, levelp, nrecsp, relenp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ void *l, *r;
+ u_int32_t flags, *levelp, *nrecsp, *relenp;
+{
+ BINTERNAL *li, *ri, *lp, *rp;
+ DB *pgset;
+ DB_MPOOLFILE *mpf;
+ DBC *cc;
+ PAGE *h;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *pip;
+ db_indx_t i;
+ db_pgno_t next_pgno, prev_pgno;
+ db_recno_t child_nrecs, nrecs;
+ u_int32_t child_level, child_relen, level, relen, stflags;
+ u_int8_t leaf_type;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ int isbad, p, ret, t_ret, toplevel;
+
+ mpf = dbp->mpf;
+ ret = isbad = 0;
+ nrecs = 0;
+ h = NULL;
+ relen = 0;
+ leaf_type = P_INVALID;
+ next_pgno = prev_pgno = PGNO_INVALID;
+ rp = (BINTERNAL *)r;
+ lp = (BINTERNAL *)l;
+
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ cc = NULL;
+ level = pip->bt_level;
+
+ toplevel = LF_ISSET(ST_TOPLEVEL) ? 1 : 0;
+ LF_CLR(ST_TOPLEVEL);
+
+ /*
+ * If this is the root, initialize the vdp's prev- and next-pgno
+ * accounting.
+ *
+ * For each leaf page we hit, we'll want to make sure that
+ * vdp->prev_pgno is the same as pip->prev_pgno and vdp->next_pgno is
+ * our page number. Then, we'll set vdp->next_pgno to pip->next_pgno
+ * and vdp->prev_pgno to our page number, and the next leaf page in
+ * line should be able to do the same verification.
+ */
+ if (toplevel) {
+ /*
+ * Cache the values stored in the vdp so that if we're an
+ * auxiliary tree such as an off-page duplicate set, our
+ * caller's leaf page chain doesn't get lost.
+ */
+ prev_pgno = vdp->prev_pgno;
+ next_pgno = vdp->next_pgno;
+ leaf_type = vdp->leaf_type;
+ vdp->next_pgno = vdp->prev_pgno = PGNO_INVALID;
+ vdp->leaf_type = P_INVALID;
+ }
+
+ /*
+ * We are recursively descending a btree, starting from the root
+ * and working our way out to the leaves.
+ *
+ * There are four cases we need to deal with:
+ * 1. pgno is a recno leaf page. Any children are overflows.
+ * 2. pgno is a duplicate leaf page. Any children
+ * are overflow pages; traverse them, and then return
+ * level and nrecs.
+ * 3. pgno is an ordinary leaf page. Check whether dups are
+ * allowed, and if so, traverse any off-page dups or
+ * overflows. Then return nrecs and level.
+ * 4. pgno is a recno internal page. Recursively check any
+ * child pages, making sure their levels are one lower
+ * and their nrecs sum to ours.
+ * 5. pgno is a btree internal page. Same as #4, plus we
+ * must verify that for each pair of BINTERNAL entries
+ * N and N+1, the leftmost item on N's child sorts
+ * greater than N, and the rightmost item on N's child
+ * sorts less than N+1.
+ *
+ * Furthermore, in any sorted page type (P_LDUP, P_LBTREE, P_IBTREE),
+ * we need to verify the internal sort order is correct if,
+ * due to overflow items, we were not able to do so earlier.
+ */
+ switch (pip->type) {
+ case P_LRECNO:
+ case P_LDUP:
+ case P_LBTREE:
+ /*
+ * Cases 1, 2 and 3.
+ *
+ * We're some sort of leaf page; verify
+ * that our linked list of leaves is consistent.
+ */
+ if (vdp->leaf_type == P_INVALID) {
+ /*
+ * First leaf page. Set the type that all its
+ * successors should be, and verify that our prev_pgno
+ * is PGNO_INVALID.
+ */
+ vdp->leaf_type = pip->type;
+ if (pip->prev_pgno != PGNO_INVALID)
+ goto bad_prev;
+ } else {
+ /*
+ * Successor leaf page. Check our type, the previous
+ * page's next_pgno, and our prev_pgno.
+ */
+ if (pip->type != vdp->leaf_type) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unexpected page type %lu found in leaf chain (expected %lu)",
+ (u_long)pip->pgno, (u_long)pip->type,
+ (u_long)vdp->leaf_type));
+ isbad = 1;
+ }
+ if (pip->pgno != vdp->next_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect next_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)vdp->prev_pgno,
+ (u_long)vdp->next_pgno, (u_long)pip->pgno));
+ isbad = 1;
+ }
+ if (pip->prev_pgno != vdp->prev_pgno) {
+bad_prev: EPRINT((dbp->dbenv,
+ "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)pip->pgno, (u_long)pip->prev_pgno,
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+ }
+ vdp->prev_pgno = pip->pgno;
+ vdp->next_pgno = pip->next_pgno;
+
+ /*
+ * Overflow pages are common to all three leaf types;
+ * traverse the child list, looking for overflows.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen,
+ flags | ST_OVFL_LEAF)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* Case 1 */
+ if (pip->type == P_LRECNO) {
+ if (!LF_ISSET(ST_IS_RECNO) &&
+ !(LF_ISSET(ST_DUPOK) && !LF_ISSET(ST_DUPSORT))) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno leaf page non-recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+ goto leaf;
+ } else if (LF_ISSET(ST_IS_RECNO)) {
+ /*
+ * It's a non-recno leaf. Had better not be a recno
+ * subtree.
+ */
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: non-recno leaf page in recno tree",
+ (u_long)pgno));
+ goto done;
+ }
+
+ /* Case 2--no more work. */
+ if (pip->type == P_LDUP)
+ goto leaf;
+
+ /* Case 3 */
+
+ /* Check if we have any dups. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS)) {
+ /* If dups aren't allowed in this btree, trouble. */
+ if (!LF_ISSET(ST_DUPOK)) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicates in non-dup btree",
+ (u_long)pgno));
+ } else {
+ /*
+ * We correctly have dups. If any are off-page,
+ * traverse those btrees recursively.
+ */
+ if ((ret =
+ __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child);
+ ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child)) {
+ stflags = flags | ST_RECNUM | ST_DUPSET;
+ /* Skip any overflow entries. */
+ if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(
+ dbp, vdp, child->pgno,
+ stflags)) != 0) {
+ isbad = 1;
+ /* Next child. */
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(
+ dbp, vdp, child->pgno, NULL,
+ NULL, stflags | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
+ if (ret !=
+ DB_VERIFY_BAD)
+ goto err;
+ else
+ isbad = 1;
+ }
+ }
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /*
+ * If VRFY_DUPS_UNSORTED is set,
+ * ST_DUPSORT had better not be.
+ */
+ if (F_ISSET(pip, VRFY_DUPS_UNSORTED) &&
+ LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unsorted duplicate set in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ }
+ }
+ goto leaf;
+ case P_IBTREE:
+ case P_IRECNO:
+ /* We handle these below. */
+ break;
+ default:
+ /*
+ * If a P_IBTREE or P_IRECNO contains a reference to an
+ * invalid page, we'll wind up here; handle it gracefully.
+ * Note that the code at the "done" label assumes that the
+ * current page is a btree/recno one of some sort; this
+ * is not the case here, so we goto err.
+ *
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
+ */
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbp->dbenv,
+ pgno, "btree or recno page");
+ else
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree or recno page is of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Cases 4 & 5: This is a btree or recno internal page. For each child,
+ * recurse, keeping a running count of nrecs and making sure the level
+ * is always reasonable.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_RECNO) {
+ if (pip->type != P_IRECNO) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__bam_vrfy_subtree",
+ pgno, pip->type);
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno,
+ NULL, NULL, flags, &child_level, &child_nrecs,
+ &child_relen)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RELEN)) {
+ if (relen == 0)
+ relen = child_relen;
+ /*
+ * child_relen may be zero if the child subtree
+ * is empty.
+ */
+ else if (child_relen > 0 &&
+ relen != child_relen) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: recno page returned bad re_len %lu",
+ (u_long)child->pgno,
+ (u_long)child_relen));
+ }
+ if (relenp)
+ *relenp = relen;
+ }
+ if (LF_ISSET(ST_RECNUM))
+ nrecs += child_nrecs;
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: recno level incorrect: got %lu, expected %lu",
+ (u_long)child->pgno, (u_long)child_level,
+ (u_long)(level - 1)));
+ }
+ } else if (child->type == V_OVERFLOW &&
+ (ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto done;
+ }
+
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* We're done with case 4. */
+ if (pip->type == P_IRECNO)
+ goto done;
+
+ /*
+ * Case 5. Btree internal pages.
+ * As described above, we need to iterate through all the
+ * items on the page and make sure that our children sort appropriately
+ * with respect to them.
+ *
+ * For each entry, li will be the "left-hand" key for the entry
+ * itself, which must sort lower than all entries on its child;
+ * ri will be the key to its right, which must sort greater.
+ */
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ for (i = 0; i < pip->entries; i += O_INDX) {
+ li = GET_BINTERNAL(dbp, h, i);
+ ri = (i + O_INDX < pip->entries) ?
+ GET_BINTERNAL(dbp, h, i + O_INDX) : NULL;
+
+ /*
+ * The leftmost key is forcibly sorted less than all entries,
+ * so don't bother passing it.
+ */
+ if ((ret = __bam_vrfy_subtree(dbp, vdp, li->pgno,
+ i == 0 ? NULL : li, ri, flags, &child_level,
+ &child_nrecs, NULL)) != 0) {
+ if (ret != DB_VERIFY_BAD)
+ goto done;
+ else
+ isbad = 1;
+ }
+
+ if (LF_ISSET(ST_RECNUM)) {
+ /*
+ * Keep a running tally on the actual record count so
+ * we can return it to our parent (if we have one) or
+ * compare it to the NRECS field if we're a root page.
+ */
+ nrecs += child_nrecs;
+
+ /*
+ * Make sure the actual record count of the child
+ * is equal to the value in the BINTERNAL structure.
+ */
+ if (li->nrecs != child_nrecs) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu has incorrect record count of %lu, should be %lu",
+ (u_long)pgno, (u_long)i, (u_long)li->nrecs,
+ (u_long)child_nrecs));
+ }
+ }
+
+ if (level != child_level + 1) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: Btree level incorrect: got %lu, expected %lu",
+ (u_long)li->pgno,
+ (u_long)child_level, (u_long)(level - 1)));
+ }
+ }
+
+ if (0) {
+leaf: level = LEAFLEVEL;
+ if (LF_ISSET(ST_RECNUM))
+ nrecs = pip->rec_cnt;
+
+ /* XXX
+ * We should verify that the record count on a leaf page
+ * is the sum of the number of keys and the number of
+ * records in its off-page dups. This requires looking
+ * at the page again, however, and it may all be changing
+ * soon, so for now we don't bother.
+ */
+
+ if (LF_ISSET(ST_RELEN) && relenp)
+ *relenp = pip->re_len;
+ }
+done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
+ /*
+ * During the page-by-page pass, item order verification was
+ * not finished due to the presence of overflow items. If
+ * isbad == 0, though, it's now safe to do so, as we've
+ * traversed any child overflow pages. Do it.
+ */
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ vdp, h, pgno, 0, 1, 0, flags)) != 0)
+ goto err;
+ F_CLR(pip, VRFY_INCOMPLETE);
+ }
+
+ /*
+ * It's possible to get to this point with a page that has no
+ * items, but without having detected any sort of failure yet.
+ * Having zero items is legal if it's a leaf--it may be the
+ * root page in an empty tree, or the tree may have been
+ * modified with the DB_REVSPLITOFF flag set (there's no way
+ * to tell from what's on disk). For an internal page,
+ * though, having no items is a problem (all internal pages
+ * must have children).
+ */
+ if (isbad == 0 && ret == 0) {
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ if (NUM_ENT(h) == 0 && ISINTERNAL(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: internal page is empty and should not be",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+ }
+ }
+
+ /*
+ * Our parent has sent us BINTERNAL pointers to parent records
+ * so that we can verify our place with respect to them. If it's
+ * appropriate--we have a default sort function--verify this.
+ */
+ if (isbad == 0 && ret == 0 && !LF_ISSET(DB_NOORDERCHK) && lp != NULL) {
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /*
+ * __bam_vrfy_treeorder needs to know what comparison function
+ * to use. If ST_DUPSET is set, we're in a duplicate tree
+ * and we use the duplicate comparison function; otherwise,
+ * use the btree one. If unset, use the default, of course.
+ */
+ func = LF_ISSET(ST_DUPSET) ? dbp->dup_compare :
+ ((BTREE *)dbp->bt_internal)->bt_compare;
+ if (func == NULL)
+ func = __bam_defcmp;
+
+ if ((ret = __bam_vrfy_treeorder(
+ dbp, pgno, h, lp, rp, func, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+
+ /*
+ * This is guaranteed to succeed for leaf pages, but no harm done.
+ *
+ * Internal pages below the top level do not store their own
+ * record numbers, so we skip them.
+ */
+ if (LF_ISSET(ST_RECNUM) && nrecs != pip->rec_cnt && toplevel) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad record count: has %lu records, claims %lu",
+ (u_long)pgno, (u_long)nrecs, (u_long)pip->rec_cnt));
+ }
+
+ if (levelp)
+ *levelp = level;
+ if (nrecsp)
+ *nrecsp = nrecs;
+
+ pgset = vdp->pgset;
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv, "Page %lu: linked twice", (u_long)pgno));
+ } else if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+ if (toplevel)
+ /*
+ * The last page's next_pgno in the leaf chain should have been
+ * PGNO_INVALID.
+ */
+ if (vdp->next_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv, "Page %lu: unterminated leaf chain",
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+
+err: if (toplevel) {
+ /* Restore our caller's settings. */
+ vdp->next_pgno = next_pgno;
+ vdp->prev_pgno = prev_pgno;
+ vdp->leaf_type = leaf_type;
+ }
+
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __bam_vrfy_treeorder --
+ * Verify that the lowest key on a page sorts greater than the
+ * BINTERNAL which points to it (lp), and the highest key
+ * sorts less than the BINTERNAL above that (rp).
+ *
+ * If lp is NULL, this means that it was the leftmost key on the
+ * parent, which (regardless of sort function) sorts less than
+ * all keys. No need to check it.
+ *
+ * If rp is NULL, lp was the highest key on the parent, so there's
+ * no higher key we must sort less than.
+ */
+static int
+__bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ PAGE *h;
+ BINTERNAL *lp, *rp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int32_t flags;
+{
+ BOVERFLOW *bo;
+ DBT dbt;
+ db_indx_t last;
+ int ret, cmp;
+
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_MALLOC);
+ ret = 0;
+
+ /*
+ * Empty pages are sorted correctly by definition. We check
+ * to see whether they ought to be empty elsewhere; leaf
+ * pages legally may be.
+ */
+ if (NUM_ENT(h) == 0)
+ return (0);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_LDUP:
+ last = NUM_ENT(h) - O_INDX;
+ break;
+ case P_LBTREE:
+ last = NUM_ENT(h) - P_INDX;
+ break;
+ default:
+ TYPE_ERR_PRINT(dbp->dbenv,
+ "__bam_vrfy_treeorder", pgno, TYPE(h));
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * The key on page h, the child page, is more likely to be
+ * an overflow page, so we pass its offset, rather than lp/rp's,
+ * into __bam_cmp. This will take advantage of __db_moff.
+ */
+
+ /*
+ * Skip first-item check if we're an internal page--the first
+ * entry on an internal page is treated specially by __bam_cmp,
+ * so what's on the page shouldn't matter. (Plus, since we're passing
+ * our page and item 0 as to __bam_cmp, we'll sort before our
+ * parent and falsely report a failure.)
+ */
+ if (lp != NULL && TYPE(h) != P_IBTREE) {
+ if (lp->type == B_KEYDATA) {
+ dbt.data = lp->data;
+ dbt.size = lp->len;
+ } else if (lp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)lp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) {
+ if (cmp > 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first item on page sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "Page %lu: first item on page had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != lp->data)
+ __os_ufree(dbp->dbenv, dbt.data);
+ if (ret != 0)
+ return (ret);
+ }
+
+ if (rp != NULL) {
+ if (rp->type == B_KEYDATA) {
+ dbt.data = rp->data;
+ dbt.size = rp->len;
+ } else if (rp->type == B_OVERFLOW) {
+ bo = (BOVERFLOW *)rp->data;
+ if ((ret = __db_goff(dbp, &dbt, bo->tlen, bo->pgno,
+ NULL, NULL)) != 0)
+ return (ret);
+ } else {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
+ return (EINVAL);
+ }
+
+ /* On error, fall through, free if neeeded, and return. */
+ if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) {
+ if (cmp < 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: last item on page sorted greater than parent entry",
+ (u_long)PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ }
+ } else
+ EPRINT((dbp->dbenv,
+ "Page %lu: last item on page had comparison error",
+ (u_long)PGNO(h)));
+
+ if (dbt.data != rp->data)
+ __os_ufree(dbp->dbenv, dbt.data);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * btree leaf page.
+ *
+ * PUBLIC: int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t,
+ * PUBLIC: PAGE *, void *, int (*)(void *, const void *), DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ DBT *key;
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ db_indx_t i, beg, end, *inp;
+ u_int32_t himark;
+ u_int8_t *pgmap;
+ void *ovflbuf;
+ int t_ret, ret, err_ret;
+
+ /* Shut up lint. */
+ COMPQUIET(end, 0);
+
+ ovflbuf = pgmap = NULL;
+ err_ret = ret = 0;
+ inp = P_INP(dbp, h);
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)(strlen("UNKNOWN") + 1);
+ unkdbt.data = "UNKNOWN";
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &ovflbuf)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ if ((ret =
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pgmap)) != 0)
+ goto err;
+ memset(pgmap, 0, dbp->pgsize);
+ }
+
+ /*
+ * Loop through the inp array, spitting out key/data pairs.
+ *
+ * If we're salvaging normally, loop from 0 through NUM_ENT(h).
+ * If we're being aggressive, loop until we hit the end of the page--
+ * NUM_ENT() may be bogus.
+ */
+ himark = dbp->pgsize;
+ for (i = 0;; i += O_INDX) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL) {
+ /*
+ * Don't return DB_VERIFY_FATAL; it's private
+ * and means only that we can't go on with this
+ * page, not with the whole database. It's
+ * not even an error if we've run into it
+ * after NUM_ENT(h).
+ */
+ ret = (i < NUM_ENT(h)) ? DB_VERIFY_BAD : 0;
+ break;
+ }
+
+ /*
+ * If this returned 0, it's safe to print or (carefully)
+ * try to fetch.
+ */
+ if (ret == 0) {
+ /*
+ * We only want to print deleted items if
+ * DB_AGGRESSIVE is set.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type))
+ continue;
+
+ /*
+ * We're going to go try to print the next item. If
+ * key is non-NULL, we're a dup page, so we've got to
+ * print the key first, unless SA_SKIPFIRSTKEY is set
+ * and we're on the first entry.
+ */
+ if (key != NULL &&
+ (i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY)))
+ if ((ret = __db_prdbt(key,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+
+ beg = inp[i];
+ switch (B_TYPE(bk->type)) {
+ case B_DUPLICATE:
+ end = beg + BOVERFLOW_SIZE - 1;
+ /*
+ * If we're not on a normal btree leaf page,
+ * there shouldn't be off-page
+ * dup sets. Something's confused; just
+ * drop it, and the code to pick up unlinked
+ * offpage dup sets will print it out
+ * with key "UNKNOWN" later.
+ */
+ if (pgtype != P_LBTREE)
+ break;
+
+ bo = (BOVERFLOW *)bk;
+
+ /*
+ * If the page number is unreasonable, or
+ * if this is supposed to be a key item,
+ * just spit out "UNKNOWN"--the best we
+ * can do is run into the data items in the
+ * unlinked offpage dup pass.
+ */
+ if (!IS_VALID_PGNO(bo->pgno) ||
+ (i % P_INDX == 0)) {
+ /* Not much to do on failure. */
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_salvage_duptree(dbp,
+ vdp, bo->pgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+
+ break;
+ case B_KEYDATA:
+ end =
+ ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1;
+ dbt.data = bk->data;
+ dbt.size = bk->len;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case B_OVERFLOW:
+ end = beg + BOVERFLOW_SIZE - 1;
+ bo = (BOVERFLOW *)bk;
+ if ((ret = __db_safe_goff(dbp, vdp,
+ bo->pgno, &dbt, &ovflbuf, flags)) != 0) {
+ err_ret = ret;
+ /* We care about err_ret more. */
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ default:
+ /*
+ * We should never get here; __db_vrfy_inpitem
+ * should not be returning 0 if bk->type
+ * is unrecognizable.
+ */
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ /*
+ * If we're being aggressive, mark the beginning
+ * and end of the item; we'll come back and print
+ * whatever "junk" is in the gaps in case we had
+ * any bogus inp elements and thereby missed stuff.
+ */
+ if (LF_ISSET(DB_AGGRESSIVE)) {
+ pgmap[beg] = ITEM_BEGIN;
+ pgmap[end] = ITEM_END;
+ }
+ }
+ }
+
+ /*
+ * If i is odd and this is a btree leaf, we've printed out a key but not
+ * a datum; fix this imbalance by printing an "UNKNOWN".
+ */
+ if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret =
+ __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0))
+ err_ret = ret;
+
+err: if (pgmap != NULL)
+ __os_free(dbp->dbenv, pgmap);
+ __os_free(dbp->dbenv, ovflbuf);
+
+ /* Mark this page as done. */
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __bam_salvage_walkdupint --
+ * Walk a known-good btree or recno internal page which is part of
+ * a dup tree, calling __db_salvage_duptree on each child page.
+ *
+ * PUBLIC: int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__bam_salvage_walkdupint(dbp, vdp, h, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ RINTERNAL *ri;
+ BINTERNAL *bi;
+ int ret, t_ret;
+ db_indx_t i;
+
+ ret = 0;
+ for (i = 0; i < NUM_ENT(h); i++) {
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ bi = GET_BINTERNAL(dbp, h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, bi->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ break;
+ case P_IRECNO:
+ ri = GET_RINTERNAL(dbp, h, i);
+ if ((t_ret = __db_salvage_duptree(dbp,
+ vdp, ri->pgno, key, handle, callback, flags)) != 0)
+ ret = t_ret;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "__bam_salvage_walkdupint called on non-int. page");
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+ /* Pass SA_SKIPFIRSTKEY, if set, on to the 0th child only. */
+ flags &= ~LF_ISSET(SA_SKIPFIRSTKEY);
+ }
+
+ return (ret);
+}
+
+/*
+ * __bam_meta2pgset --
+ * Given a known-good meta page, return in pgsetp a 0-terminated list of
+ * db_pgno_t's corresponding to the pages in the btree.
+ *
+ * We do this by a somewhat sleazy method, to avoid having to traverse the
+ * btree structure neatly: we walk down the left side to the very
+ * first leaf page, then we mark all the pages in the chain of
+ * NEXT_PGNOs (being wary of cycles and invalid ones), then we
+ * consolidate our scratch array into a nice list, and return. This
+ * avoids the memory management hassles of recursion and the
+ * trouble of walking internal pages--they just don't matter, except
+ * for the left branch.
+ *
+ * PUBLIC: int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *,
+ * PUBLIC: u_int32_t, DB *));
+ */
+int
+__bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ BTMETA *btmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ BINTERNAL *bi;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ RINTERNAL *ri;
+ db_pgno_t current, p;
+ int err_ret, ret;
+
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = err_ret = 0;
+ DB_ASSERT(pgset != NULL);
+ for (current = btmeta->root;;) {
+ if (!IS_VALID_PGNO(current) || current == PGNO(btmeta)) {
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = mpf->get(mpf, &current, 0, &h)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, current, flags | DB_NOORDERCHK)) != 0) {
+ err_ret = ret;
+ goto err;
+ }
+ if (TYPE(h) == P_IBTREE) {
+ bi = GET_BINTERNAL(dbp, h, 0);
+ current = bi->pgno;
+ } else { /* P_IRECNO */
+ ri = GET_RINTERNAL(dbp, h, 0);
+ current = ri->pgno;
+ }
+ break;
+ case P_LBTREE:
+ case P_LRECNO:
+ goto traverse;
+ default:
+ err_ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+ /*
+ * At this point, current is the pgno of leaf page h, the 0th in the
+ * tree we're concerned with.
+ */
+traverse:
+ while (IS_VALID_PGNO(current) && current != PGNO_INVALID) {
+ if (h == NULL && (ret = mpf->get(mpf, &current, 0, &h)) != 0) {
+ err_ret = ret;
+ break;
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, current, (int *)&p)) != 0)
+ goto err;
+
+ if (p != 0) {
+ /*
+ * We've found a cycle. Return success anyway--
+ * our caller may as well use however much of
+ * the pgset we've come up with.
+ */
+ break;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, current)) != 0)
+ goto err;
+
+ current = NEXT_PGNO(h);
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ h = NULL;
+ }
+
+err: if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+
+ return (ret == 0 ? err_ret : ret);
+}
+
+/*
+ * __bam_safe_getdata --
+ *
+ * Utility function for __bam_vrfy_itemorder. Safely gets the datum at
+ * index i, page h, and sticks it in DBT dbt. If ovflok is 1 and i's an
+ * overflow item, we do a safe_goff to get the item and signal that we need
+ * to free dbt->data; if ovflok is 0, we leaves the DBT zeroed.
+ */
+static int
+__bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbtp)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t i;
+ int ovflok;
+ DBT *dbt;
+ int *freedbtp;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+
+ memset(dbt, 0, sizeof(DBT));
+ *freedbtp = 0;
+
+ bk = GET_BKEYDATA(dbp, h, i);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ if (!ovflok)
+ return (0);
+
+ bo = (BOVERFLOW *)bk;
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ *freedbtp = 1;
+ return (__db_goff(dbp, dbt, bo->tlen, bo->pgno, NULL, NULL));
+ } else {
+ dbt->data = bk->data;
+ dbt->size = bk->len;
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/btree/btree.src b/storage/bdb/btree/btree.src
new file mode 100644
index 00000000000..73f4abac874
--- /dev/null
+++ b/storage/bdb/btree/btree.src
@@ -0,0 +1,208 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: btree.src,v 10.35 2002/04/17 19:02:56 krinsky Exp $
+ */
+
+PREFIX __bam
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/btree.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * NOTE: pg_alloc and pg_free have been moved to db.src, where they belong.
+ */
+
+/*
+ * BTREE-split: used to log a page split.
+ *
+ * left: the page number for the low-order contents.
+ * llsn: the left page's original LSN.
+ * right: the page number for the high-order contents.
+ * rlsn: the right page's original LSN.
+ * indx: the number of entries that went to the left page.
+ * npgno: the next page number
+ * nlsn: the next page's original LSN (or 0 if no next page).
+ * root_pgno: the root page number
+ * pg: the split page's contents before the split.
+ * opflags: SPL_NRECS: if splitting a tree that maintains a record count.
+ */
+BEGIN split 62
+DB fileid int32_t ld
+WRLOCK left db_pgno_t lu
+POINTER llsn DB_LSN * lu
+WRLOCK right db_pgno_t lu
+POINTER rlsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG npgno db_pgno_t lu
+POINTER nlsn DB_LSN * lu
+WRLOCKNZ root_pgno db_pgno_t lu
+PGDBT pg DBT s
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-rsplit: used to log a reverse-split
+ *
+ * pgno: the page number of the page copied over the root.
+ * pgdbt: the page being copied on the root page.
+ * root_pgno: the root page number.
+ * nrec: the tree's record count.
+ * rootent: last entry on the root page.
+ * rootlsn: the root page's original lsn.
+ */
+BEGIN rsplit 63
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT pgdbt DBT s
+WRLOCK root_pgno db_pgno_t lu
+ARG nrec db_pgno_t lu
+DBT rootent DBT s
+POINTER rootlsn DB_LSN * lu
+END
+
+/*
+ * BTREE-adj: used to log the adjustment of an index.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index adjusted.
+ * indx_copy: the index to copy if inserting.
+ * is_insert: 0 if a delete, 1 if an insert.
+ */
+BEGIN adj 55
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG indx_copy u_int32_t lu
+ARG is_insert u_int32_t lu
+END
+
+/*
+ * BTREE-cadjust: used to adjust the count change in an internal page.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be adjusted.
+ * adjust: the signed adjustment.
+ * opflags: CAD_UPDATEROOT: if root page count was adjusted.
+ */
+BEGIN cadjust 56
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG adjust int32_t ld
+ARG opflags u_int32_t lu
+END
+
+/*
+ * BTREE-cdel: used to log the intent-to-delete of a cursor record.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * indx: the index to be deleted.
+ */
+BEGIN cdel 57
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+END
+
+/*
+ * BTREE-repl: used to log the replacement of an item.
+ *
+ * pgno: the page modified.
+ * lsn: the page's original lsn.
+ * orig: the original data.
+ * new: the replacement data.
+ * duplicate: the prefix of the replacement that matches the original.
+ */
+BEGIN repl 58
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+ARG indx u_int32_t lu
+ARG isdeleted u_int32_t lu
+DBT orig DBT s
+DBT repl DBT s
+ARG prefix u_int32_t lu
+ARG suffix u_int32_t lu
+END
+
+/*
+ * BTREE-root: log the assignment of a root btree page.
+ */
+BEGIN root 59
+DB fileid int32_t ld
+WRLOCK meta_pgno db_pgno_t lu
+WRLOCK root_pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+END
+
+/*
+ * BTREE-curadj: undo cursor adjustments on txn abort.
+ * Should only be processed during DB_TXN_ABORT.
+ * NOTE: the first_indx field gets used to hold
+ * signed index adjustment in one case.
+ * care should be taken if its size is changed.
+ */
+BEGIN curadj 64
+/* Fileid of db affected. */
+DB fileid int32_t ld
+/* Which adjustment. */
+ARG mode db_ca_mode ld
+/* Page entry is from. */
+ARG from_pgno db_pgno_t lu
+/* Page entry went to. */
+ARG to_pgno db_pgno_t lu
+/* Left page of root split. */
+ARG left_pgno db_pgno_t lu
+/* First index of dup set. Also used as adjustment. */
+ARG first_indx u_int32_t lu
+/* Index entry is from. */
+ARG from_indx u_int32_t lu
+/* Index where entry went. */
+ARG to_indx u_int32_t lu
+END
+
+/*
+ * BTREE-rcuradj: undo cursor adjustments on txn abort in
+ * renumbering recno trees.
+ * Should only be processed during DB_TXN_ABORT.
+ */
+BEGIN rcuradj 65
+/* Fileid of db affected. */
+DB fileid int32_t ld
+/* Which adjustment. */
+ARG mode ca_recno_arg ld
+/* Root page number. */
+ARG root db_pgno_t ld
+/* Recno of the adjustment. */
+ARG recno db_recno_t ld
+/* Order number of the adjustment. */
+ARG order u_int32_t ld
+END
diff --git a/storage/bdb/build_unix/.IGNORE_ME b/storage/bdb/build_unix/.IGNORE_ME
new file mode 100644
index 00000000000..558fd496f0c
--- /dev/null
+++ b/storage/bdb/build_unix/.IGNORE_ME
@@ -0,0 +1,3 @@
+Some combinations of the gzip and tar archive exploders found
+on Linux systems ignore directories that don't have any files
+(other than symbolic links) in them. So, here's a file.
diff --git a/storage/bdb/build_vxworks/BerkeleyDB.wsp b/storage/bdb/build_vxworks/BerkeleyDB.wsp
new file mode 100644
index 00000000000..ce2e71b0eb3
--- /dev/null
+++ b/storage/bdb/build_vxworks/BerkeleyDB.wsp
@@ -0,0 +1,29 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+Workspace
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> projectList
+$(PRJ_DIR)/BerkeleyDB.wpj \
+ $(PRJ_DIR)/db_archive/db_archive.wpj \
+ $(PRJ_DIR)/db_checkpoint/db_checkpoint.wpj \
+ $(PRJ_DIR)/db_deadlock/db_deadlock.wpj \
+ $(PRJ_DIR)/db_dump/db_dump.wpj \
+ $(PRJ_DIR)/db_load/db_load.wpj \
+ $(PRJ_DIR)/db_printlog/db_printlog.wpj \
+ $(PRJ_DIR)/db_recover/db_recover.wpj \
+ $(PRJ_DIR)/db_stat/db_stat.wpj \
+ $(PRJ_DIR)/db_upgrade/db_upgrade.wpj \
+ $(PRJ_DIR)/db_verify/db_verify.wpj \
+ $(PRJ_DIR)/dbdemo/dbdemo.wpj
+<END>
+
+<BEGIN> userComments
+
+<END>
+
diff --git a/storage/bdb/build_vxworks/dbdemo/README b/storage/bdb/build_vxworks/dbdemo/README
new file mode 100644
index 00000000000..1a2c7c7d073
--- /dev/null
+++ b/storage/bdb/build_vxworks/dbdemo/README
@@ -0,0 +1,39 @@
+This README describes the steps needed to run a demo example of BerkeleyDB.
+
+1. Read the pages in the Reference Guide that describe building
+ BerkeleyDB on VxWorks:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/intro.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/notes.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/faq.html
+
+2. Launch Tornado 2.0 and open up the BerkeleyDB project.
+
+3. Add the demo project to that workspace:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/build_vxworks/demo/dbdemo.wpj
+
+4. Build BerkeleyDB as described in the Reference Guide.
+
+5. Build the dbdemo project.
+
+6. Download BerkeleyDB onto the target.
+
+7. Download the dbdemo project onto the target.
+
+8. Open a windsh to the target and run the demo:
+
+ -> dbdemo "<pathname>/<dbname>"
+
+ Where pathname is a pathname string pointing to a directory that the
+ demo can create a database in. That directory should already exist.
+ The dbname is the name for the database. For example:
+
+ -> dbdemo "/tmp/demo.db"
+
+9. The demo program will ask for input. You can type in any string.
+ The program will add an entry to the database with that string as
+ the key and the reverse of that string as the data item for that key.
+ It will continue asking for input until you hit ^D or enter "quit".
+ Upon doing so, the demo program will display all the keys you have
+ entered as input and their data items.
diff --git a/storage/bdb/build_win32/Berkeley_DB.dsw b/storage/bdb/build_win32/Berkeley_DB.dsw
new file mode 100644
index 00000000000..899e31ad58d
--- /dev/null
+++ b/storage/bdb/build_win32/Berkeley_DB.dsw
@@ -0,0 +1,568 @@
+Microsoft Developer Studio Workspace File, Format Version 5.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "DB_DLL"=.\db_dll.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "DB_Static"=.\db_static.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "db_archive"=.\db_archive.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_buildall"=.\db_buildall.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_archive
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_checkpoint
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_deadlock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_dump
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_load
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_printlog
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_recover
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_stat
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_upgrade
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_verify
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name ex_tpcb
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_access
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_btrec
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_env
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_lock
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_mpool
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name excxx_tpcb
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_deadlock"=.\db_deadlock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_dump"=.\db_dump.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_java"=.\db_java.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_load"=.\db_load.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_recover"=.\db_recover.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_stat"=.\db_stat.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_test"=.\db_test.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name db_buildall
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_tcl
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_upgrade"=.\db_upgrade.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_verify"=.\db_verify.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_access"=.\ex_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_btrec"=.\ex_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_env"=.\ex_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_lock"=.\ex_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_mpool"=.\ex_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "ex_tpcb"=.\ex_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_access"=.\excxx_access.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_btrec"=.\excxx_btrec.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_env"=.\excxx_env.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_lock"=.\excxx_lock.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_mpool"=.\excxx_mpool.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "excxx_tpcb"=.\excxx_tpcb.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name DB_DLL
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name DB_Static
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
diff --git a/storage/bdb/build_win32/app_dsp.src b/storage/bdb/build_win32/app_dsp.src
new file mode 100644
index 00000000000..ff98d39ec79
--- /dev/null
+++ b/storage/bdb/build_win32/app_dsp.src
@@ -0,0 +1,145 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/build_all.dsp b/storage/bdb/build_win32/build_all.dsp
new file mode 100644
index 00000000000..7ae1f9bb031
--- /dev/null
+++ b/storage/bdb/build_win32/build_all.dsp
@@ -0,0 +1,96 @@
+# Microsoft Developer Studio Project File - Name="build_all" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=build_all - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak" CFG="build_all - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "build_all - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Release Static" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug Static" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "build_all - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "echo DB Release version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "echo DB Debug version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Cmd_Line "echo DB Release Static version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Cmd_Line "echo DB Debug Static version built."
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "build_all - Win32 Release"
+# Name "build_all - Win32 Debug"
+# Name "build_all - Win32 Release Static"
+# Name "build_all - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/db_java_xa.dsp b/storage/bdb/build_win32/db_java_xa.dsp
new file mode 100644
index 00000000000..9c700ffeed4
--- /dev/null
+++ b/storage/bdb/build_win32/db_java_xa.dsp
@@ -0,0 +1,85 @@
+# Microsoft Developer Studio Project File - Name="db_java_xa" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) External Target" 0x0106
+
+CFG=db_java_xa - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak" CFG="db_java_xa - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_java_xa - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "db_java_xa - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Release/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Release/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Debug/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Debug/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_java_xa - Win32 Release"
+# Name "db_java_xa - Win32 Debug"
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+!ENDIF
+
+# Begin Source File
+
+SOURCE=.\db_java_xaj.mak
+# End Source File
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/db_java_xaj.mak b/storage/bdb/build_win32/db_java_xaj.mak
new file mode 100644
index 00000000000..c2dbc920d17
--- /dev/null
+++ b/storage/bdb/build_win32/db_java_xaj.mak
@@ -0,0 +1,21 @@
+JAVA_XADIR=../java/src/com/sleepycat/db/xa
+
+JAVA_XASRCS=\
+ $(JAVA_XADIR)/DbXAResource.java \
+ $(JAVA_XADIR)/DbXid.java
+
+Release/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Release/classes -classpath "$(CLASSPATH);./Release/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Release\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
+
+Debug/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Debug/classes -classpath "$(CLASSPATH);./Debug/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Debug\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
diff --git a/storage/bdb/build_win32/db_lib.dsp b/storage/bdb/build_win32/db_lib.dsp
new file mode 100644
index 00000000000..a7fb4157909
--- /dev/null
+++ b/storage/bdb/build_win32/db_lib.dsp
@@ -0,0 +1,92 @@
+# Microsoft Developer Studio Project File - Name="db_lib" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=db_lib - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak" CFG="db_lib - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_lib - Win32 Release" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Release Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_lib - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_lib - Win32 Release"
+# Name "db_lib - Win32 Debug"
+# Name "db_lib - Win32 Release Static"
+# Name "db_lib - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/db_test.src b/storage/bdb/build_win32/db_test.src
new file mode 100644
index 00000000000..73479d3856a
--- /dev/null
+++ b/storage/bdb/build_win32/db_test.src
@@ -0,0 +1,97 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Release\*.exe .
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Debug\*.exe .
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/dbkill.cpp b/storage/bdb/build_win32/dbkill.cpp
new file mode 100644
index 00000000000..23dc87b0e85
--- /dev/null
+++ b/storage/bdb/build_win32/dbkill.cpp
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: dbkill.cpp,v 11.7 2002/01/11 15:51:27 bostic Exp $
+ */
+/*
+ * Kill -
+ * Simulate Unix kill on Windows/NT and Windows/9X.
+ * This good enough to support the Berkeley DB test suite,
+ * but may be missing some favorite features.
+ *
+ * Would have used MKS kill, but it didn't seem to work well
+ * on Win/9X. Cygnus kill works within the Gnu/Cygnus environment
+ * (where processes are given small pids, with presumably a translation
+ * table between small pids and actual process handles), but our test
+ * environment, via Tcl, does not use the Cygnus environment.
+ *
+ * Compile this and install it as c:/tools/kill.exe (or as indicated
+ * by build_win32/include.tcl ).
+ */
+
+#include <windows.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+
+/*
+ * Like atol, with specified base. Would use stdlib, but
+ * strtol("0xFFFF1234", NULL, 16) returns 0x7FFFFFFF and
+ * strtol("4294712487", NULL, 16) returns 0x7FFFFFFF w/ VC++
+ */
+long
+myatol(char *s, int base)
+{
+ long result = 0;
+ char ch;
+ int sign = 1; /* + */
+ if (base == 0)
+ base = 10;
+ if (base != 10 && base != 16)
+ return LONG_MAX;
+ while ((ch = *s++) != '\0') {
+ if (ch == '-') {
+ sign = -sign;
+ }
+ else if (ch >= '0' && ch <= '9') {
+ result = result * base + (ch - '0');
+ }
+ else if (ch == 'x' || ch == 'X') {
+ /* Allow leading 0x..., and switch to base 16 */
+ base = 16;
+ }
+ else if (base == 16 && ch >= 'a' && ch <= 'f') {
+ result = result * base + (ch - 'a' + 10);
+ }
+ else if (base == 16 && ch >= 'A' && ch <= 'F') {
+ result = result * base + (ch - 'A' + 10);
+ }
+ else {
+ if (sign > 1)
+ return LONG_MAX;
+ else
+ return LONG_MIN;
+ }
+ }
+ return sign * result;
+}
+
+void
+usage_exit()
+{
+ fprintf(stderr, "Usage: kill [ -sig ] pid\n");
+ fprintf(stderr, " for win32, sig must be or 0, 15 (TERM)\n");
+ exit(EXIT_FAILURE);
+}
+
+int
+main(int argc, char **argv)
+{
+ HANDLE hProcess ;
+ DWORD accessflag;
+ long pid;
+ int sig = 15;
+
+ if (argc > 2) {
+ if (argv[1][0] != '-')
+ usage_exit();
+
+ if (strcmp(argv[1], "-TERM") == 0)
+ sig = 15;
+ else {
+ /* currently sig is more or less ignored,
+ * we only care if it is zero or not
+ */
+ sig = atoi(&argv[1][1]);
+ if (sig < 0)
+ usage_exit();
+ }
+ argc--;
+ argv++;
+ }
+ if (argc < 2)
+ usage_exit();
+
+ pid = myatol(argv[1], 10);
+ /*printf("pid = %ld (0x%lx) (command line %s)\n", pid, pid, argv[1]);*/
+ if (pid == LONG_MAX || pid == LONG_MIN)
+ usage_exit();
+
+ if (sig == 0)
+ accessflag = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
+ else
+ accessflag = STANDARD_RIGHTS_REQUIRED | PROCESS_TERMINATE;
+ hProcess = OpenProcess(accessflag, FALSE, pid);
+ if (hProcess == NULL) {
+ fprintf(stderr, "dbkill: %s: no such process\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+ if (sig == 0)
+ exit(EXIT_SUCCESS);
+ if (!TerminateProcess(hProcess, 99)) {
+ DWORD err = GetLastError();
+ fprintf(stderr,
+ "dbkill: cannot kill process: error %d (0x%lx)\n", err, err);
+ exit(EXIT_FAILURE);
+ }
+ return EXIT_SUCCESS;
+}
diff --git a/storage/bdb/build_win32/dllmain.c b/storage/bdb/build_win32/dllmain.c
new file mode 100644
index 00000000000..70c2e849d66
--- /dev/null
+++ b/storage/bdb/build_win32/dllmain.c
@@ -0,0 +1,97 @@
+/*
+ * --------------------------------------------------------------------------
+ * Copyright (C) 1997 Netscape Communications Corporation
+ * --------------------------------------------------------------------------
+ *
+ * dllmain.c
+ *
+ * $Id: dllmain.c,v 1.3 2000/10/26 21:58:48 bostic Exp $
+ */
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+static int ProcessesAttached = 0;
+static HINSTANCE Instance; /* Global library instance handle. */
+
+/*
+ * The following declaration is for the VC++ DLL entry point.
+ */
+
+BOOL APIENTRY DllMain (HINSTANCE hInst,
+ DWORD reason, LPVOID reserved);
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllEntryPoint --
+ *
+ * This wrapper function is used by Borland to invoke the
+ * initialization code for Tcl. It simply calls the DllMain
+ * routine.
+ *
+ * Results:
+ * See DllMain.
+ *
+ * Side effects:
+ * See DllMain.
+ *
+ *----------------------------------------------------------------------
+ */
+
+BOOL APIENTRY
+DllEntryPoint(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ return DllMain(hInst, reason, reserved);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * DllMain --
+ *
+ * This routine is called by the VC++ C run time library init
+ * code, or the DllEntryPoint routine. It is responsible for
+ * initializing various dynamically loaded libraries.
+ *
+ * Results:
+ * TRUE on sucess, FALSE on failure.
+ *
+ * Side effects:
+ * Establishes 32-to-16 bit thunk and initializes sockets library.
+ *
+ *----------------------------------------------------------------------
+ */
+BOOL APIENTRY
+DllMain(hInst, reason, reserved)
+ HINSTANCE hInst; /* Library instance handle. */
+ DWORD reason; /* Reason this function is being called. */
+ LPVOID reserved; /* Not used. */
+{
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+
+ /*
+ * Registration of UT need to be done only once for first
+ * attaching process. At that time set the tclWin32s flag
+ * to indicate if the DLL is executing under Win32s or not.
+ */
+
+ if (ProcessesAttached++) {
+ return FALSE; /* Not the first initialization. */
+ }
+
+ Instance = hInst;
+ return TRUE;
+
+ case DLL_PROCESS_DETACH:
+
+ ProcessesAttached--;
+ break;
+ }
+
+ return TRUE;
+}
diff --git a/storage/bdb/build_win32/dynamic_dsp.src b/storage/bdb/build_win32/dynamic_dsp.src
new file mode 100644
index 00000000000..a92906a51f4
--- /dev/null
+++ b/storage/bdb/build_win32/dynamic_dsp.src
@@ -0,0 +1,93 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/java_dsp.src b/storage/bdb/build_win32/java_dsp.src
new file mode 100644
index 00000000000..15941bcab67
--- /dev/null
+++ b/storage/bdb/build_win32/java_dsp.src
@@ -0,0 +1,129 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Release\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Release\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# Begin Custom Build - Compiling java files using javac
+ProjDir=.
+InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll
+SOURCE="$(InputPath)"
+
+"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
+ mkdir $(ProjDir)\Debug\classes
+ echo compiling Berkeley DB classes
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\db\*.java
+ echo compiling examples
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Debug\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
+ echo Java build finished
+
+# End Custom Build
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/libdb_tcl.def b/storage/bdb/build_win32/libdb_tcl.def
new file mode 100644
index 00000000000..b6323c66bc6
--- /dev/null
+++ b/storage/bdb/build_win32/libdb_tcl.def
@@ -0,0 +1,27 @@
+; $Id: libdb_tcl.def,v 11.5 2002/04/03 12:01:27 mjc Exp $
+
+DESCRIPTION 'Berkeley DB TCL interface Library'
+EXPORTS
+ Db_tcl_Init
+ db_Cmd
+ dbc_Cmd
+ env_Cmd
+ tcl_EnvRemove
+ tcl_LockDetect
+ tcl_LockGet
+ tcl_LockStat
+ tcl_LockVec
+ tcl_LogArchive
+ tcl_LogCompare
+ tcl_LogFile
+ tcl_LogFlush
+ tcl_LogGet
+ tcl_LogPut
+ tcl_LogStat
+ tcl_Mp
+ tcl_MpStat
+ tcl_MpSync
+ tcl_MpTrickle
+ tcl_Txn
+ tcl_TxnCheckpoint
+ tcl_TxnStat
diff --git a/storage/bdb/build_win32/libdbrc.src b/storage/bdb/build_win32/libdbrc.src
new file mode 100644
index 00000000000..3e5d8deec6f
--- /dev/null
+++ b/storage/bdb/build_win32/libdbrc.src
@@ -0,0 +1,33 @@
+1 VERSIONINFO
+ FILEVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ PRODUCTVERSION %MAJOR%,0,%MINOR%,%PATCH%
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "CompanyName", "Sleepycat Software\0"
+ VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
+ VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ VALUE "InternalName", "libdb.dll\0"
+ VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2002\0"
+ VALUE "OriginalFilename", "libdb.dll\0"
+ VALUE "ProductName", "Sleepycat Software libdb\0"
+ VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/storage/bdb/build_win32/srcfile_dsp.src b/storage/bdb/build_win32/srcfile_dsp.src
new file mode 100644
index 00000000000..572350e6356
--- /dev/null
+++ b/storage/bdb/build_win32/srcfile_dsp.src
@@ -0,0 +1,4 @@
+# Begin Source File
+
+SOURCE=@srcdir@\@srcfile@
+# End Source File
diff --git a/storage/bdb/build_win32/static_dsp.src b/storage/bdb/build_win32/static_dsp.src
new file mode 100644
index 00000000000..0c66c851025
--- /dev/null
+++ b/storage/bdb/build_win32/static_dsp.src
@@ -0,0 +1,85 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=@project_name@ - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
+
+# PROP BASE Use_MFC 1
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 1
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release Static"
+# Name "@project_name@ - Win32 Debug Static"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/storage/bdb/build_win32/tcl_dsp.src b/storage/bdb/build_win32/tcl_dsp.src
new file mode 100644
index 00000000000..4de41e6934e
--- /dev/null
+++ b/storage/bdb/build_win32/tcl_dsp.src
@@ -0,0 +1,93 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+MTL=midl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 2
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# SUBTRACT CPP /Fr
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/storage/bdb/clib/getcwd.c b/storage/bdb/clib/getcwd.c
new file mode 100644
index 00000000000..bae50dfe90c
--- /dev/null
+++ b/storage/bdb/clib/getcwd.c
@@ -0,0 +1,272 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: getcwd.c,v 11.13 2002/02/28 21:27:18 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define ISDOT(dp) \
+ (dp->d_name[0] == '.' && (dp->d_name[1] == '\0' || \
+ (dp->d_name[1] == '.' && dp->d_name[2] == '\0')))
+
+#ifndef dirfd
+#define dirfd(dirp) ((dirp)->dd_fd)
+#endif
+
+/*
+ * getcwd --
+ * Get the current working directory.
+ *
+ * PUBLIC: #ifndef HAVE_GETCWD
+ * PUBLIC: char *getcwd __P((char *, size_t));
+ * PUBLIC: #endif
+ */
+char *
+getcwd(pt, size)
+ char *pt;
+ size_t size;
+{
+ register struct dirent *dp;
+ register DIR *dir;
+ register dev_t dev;
+ register ino_t ino;
+ register int first;
+ register char *bpt, *bup;
+ struct stat s;
+ dev_t root_dev;
+ ino_t root_ino;
+ size_t ptsize, upsize;
+ int ret, save_errno;
+ char *ept, *eup, *up;
+
+ /*
+ * If no buffer specified by the user, allocate one as necessary.
+ * If a buffer is specified, the size has to be non-zero. The path
+ * is built from the end of the buffer backwards.
+ */
+ if (pt) {
+ ptsize = 0;
+ if (!size) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ if (size == 1) {
+ __os_set_errno(ERANGE);
+ return (NULL);
+ }
+ ept = pt + size;
+ } else {
+ if ((ret =
+ __os_malloc(NULL, ptsize = 1024 - 4, &pt)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ ept = pt + ptsize;
+ }
+ bpt = ept - 1;
+ *bpt = '\0';
+
+ /*
+ * Allocate bytes (1024 - malloc space) for the string of "../"'s.
+ * Should always be enough (it's 340 levels). If it's not, allocate
+ * as necessary. Special case the first stat, it's ".", not "..".
+ */
+ if ((ret = __os_malloc(NULL, upsize = 1024 - 4, &up)) != 0)
+ goto err;
+ eup = up + 1024;
+ bup = up;
+ up[0] = '.';
+ up[1] = '\0';
+
+ /* Save root values, so know when to stop. */
+ if (stat("/", &s))
+ goto err;
+ root_dev = s.st_dev;
+ root_ino = s.st_ino;
+
+ __os_set_errno(0); /* XXX readdir has no error return. */
+
+ for (first = 1;; first = 0) {
+ /* Stat the current level. */
+ if (lstat(up, &s))
+ goto err;
+
+ /* Save current node values. */
+ ino = s.st_ino;
+ dev = s.st_dev;
+
+ /* Check for reaching root. */
+ if (root_dev == dev && root_ino == ino) {
+ *--bpt = PATH_SEPARATOR[0];
+ /*
+ * It's unclear that it's a requirement to copy the
+ * path to the beginning of the buffer, but it's always
+ * been that way and stuff would probably break.
+ */
+ bcopy(bpt, pt, ept - bpt);
+ __os_free(NULL, up);
+ return (pt);
+ }
+
+ /*
+ * Build pointer to the parent directory, allocating memory
+ * as necessary. Max length is 3 for "../", the largest
+ * possible component name, plus a trailing NULL.
+ */
+ if (bup + 3 + MAXNAMLEN + 1 >= eup) {
+ if (__os_realloc(NULL, upsize *= 2, &up) != 0)
+ goto err;
+ bup = up;
+ eup = up + upsize;
+ }
+ *bup++ = '.';
+ *bup++ = '.';
+ *bup = '\0';
+
+ /* Open and stat parent directory. */
+ if (!(dir = opendir(up)) || fstat(dirfd(dir), &s))
+ goto err;
+
+ /* Add trailing slash for next directory. */
+ *bup++ = PATH_SEPARATOR[0];
+
+ /*
+ * If it's a mount point, have to stat each element because
+ * the inode number in the directory is for the entry in the
+ * parent directory, not the inode number of the mounted file.
+ */
+ save_errno = 0;
+ if (s.st_dev == dev) {
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (dp->d_fileno == ino)
+ break;
+ }
+ } else
+ for (;;) {
+ if (!(dp = readdir(dir)))
+ goto notfound;
+ if (ISDOT(dp))
+ continue;
+ bcopy(dp->d_name, bup, dp->d_namlen + 1);
+
+ /* Save the first error for later. */
+ if (lstat(up, &s)) {
+ if (save_errno == 0)
+ save_errno = __os_get_errno();
+ __os_set_errno(0);
+ continue;
+ }
+ if (s.st_dev == dev && s.st_ino == ino)
+ break;
+ }
+
+ /*
+ * Check for length of the current name, preceding slash,
+ * leading slash.
+ */
+ if (bpt - pt < dp->d_namlen + (first ? 1 : 2)) {
+ size_t len, off;
+
+ if (!ptsize) {
+ __os_set_errno(ERANGE);
+ goto err;
+ }
+ off = bpt - pt;
+ len = ept - bpt;
+ if (__os_realloc(NULL, ptsize *= 2, &pt) != 0)
+ goto err;
+ bpt = pt + off;
+ ept = pt + ptsize;
+ bcopy(bpt, ept - len, len);
+ bpt = ept - len;
+ }
+ if (!first)
+ *--bpt = PATH_SEPARATOR[0];
+ bpt -= dp->d_namlen;
+ bcopy(dp->d_name, bpt, dp->d_namlen);
+ (void)closedir(dir);
+
+ /* Truncate any file name. */
+ *bup = '\0';
+ }
+
+notfound:
+ /*
+ * If readdir set errno, use it, not any saved error; otherwise,
+ * didn't find the current directory in its parent directory, set
+ * errno to ENOENT.
+ */
+ if (__os_get_errno_ret_zero() == 0)
+ __os_set_errno(save_errno == 0 ? ENOENT : save_errno);
+ /* FALLTHROUGH */
+err:
+ if (ptsize)
+ __os_free(NULL, pt);
+ __os_free(NULL, up);
+ return (NULL);
+}
diff --git a/storage/bdb/clib/getopt.c b/storage/bdb/clib/getopt.c
new file mode 100644
index 00000000000..3f6659ea6e6
--- /dev/null
+++ b/storage/bdb/clib/getopt.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1987, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: getopt.c,v 11.7 2002/01/11 15:51:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+int __db_getopt_reset; /* global reset for VxWorks. */
+
+int opterr = 1, /* if error message should be printed */
+ optind = 1, /* index into parent argv vector */
+ optopt, /* character checked for validity */
+ optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+
+#undef BADCH
+#define BADCH (int)'?'
+#undef BADARG
+#define BADARG (int)':'
+#undef EMSG
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ *
+ * PUBLIC: #ifndef HAVE_GETOPT
+ * PUBLIC: int getopt __P((int, char * const *, const char *));
+ * PUBLIC: #endif
+ */
+int
+getopt(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ static char *progname;
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+
+ /*
+ * VxWorks needs to be able to repeatedly call getopt from multiple
+ * programs within its global name space.
+ */
+ if (__db_getopt_reset) {
+ __db_getopt_reset = 0;
+
+ opterr = optind = 1;
+ optopt = optreset = 0;
+ optarg = NULL;
+ progname = NULL;
+ place = EMSG;
+ }
+ if (!progname) {
+ if ((progname = __db_rpath(*nargv)) == NULL)
+ progname = *nargv;
+ else
+ ++progname;
+ }
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (EOF);
+ }
+ if (place[1] && *++place == '-') { /* found "--" */
+ ++optind;
+ place = EMSG;
+ return (EOF);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means EOF.
+ */
+ if (optopt == (int)'-')
+ return (EOF);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", progname, optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ }
+ else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if (*ostr == ':')
+ return (BADARG);
+ if (opterr)
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ progname, optopt);
+ return (BADCH);
+ }
+ else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
diff --git a/storage/bdb/clib/memcmp.c b/storage/bdb/clib/memcmp.c
new file mode 100644
index 00000000000..979badaef30
--- /dev/null
+++ b/storage/bdb/clib/memcmp.c
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: memcmp.c,v 11.7 2002/01/11 15:51:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * memcmp --
+ *
+ * PUBLIC: #ifndef HAVE_MEMCMP
+ * PUBLIC: int memcmp __P((const void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+int
+memcmp(s1, s2, n)
+ char *s1, *s2;
+ size_t n;
+{
+ if (n != 0) {
+ unsigned char *p1 = (unsigned char *)s1,
+ *p2 = (unsigned char *)s2;
+ do {
+ if (*p1++ != *p2++)
+ return (*--p1 - *--p2);
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/storage/bdb/clib/memmove.c b/storage/bdb/clib/memmove.c
new file mode 100644
index 00000000000..632d50788da
--- /dev/null
+++ b/storage/bdb/clib/memmove.c
@@ -0,0 +1,155 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: memmove.c,v 11.6 2002/01/11 15:51:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef int word; /* "word" used for optimal copy speed */
+
+#undef wsize
+#define wsize sizeof(word)
+#undef wmask
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+#ifdef MEMCOPY
+/*
+ * PUBLIC: #ifndef HAVE_MEMCPY
+ * PUBLIC: void *memcpy __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memcpy(dst0, src0, length)
+#else
+#ifdef MEMMOVE
+/*
+ * PUBLIC: #ifndef HAVE_MEMMOVE
+ * PUBLIC: void *memmove __P((void *, const void *, size_t));
+ * PUBLIC: #endif
+ */
+void *
+memmove(dst0, src0, length)
+#else
+void
+bcopy(src0, dst0, length)
+#endif
+#endif
+ void *dst0;
+ const void *src0;
+ register size_t length;
+{
+ register char *dst = dst0;
+ register const char *src = src0;
+ register size_t t;
+
+ if (length == 0 || dst == src) /* nothing to do */
+ goto done;
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#undef TLOOP
+#define TLOOP(s) if (t) TLOOP1(s)
+#undef TLOOP1
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (int)src; /* only need low bits */
+ if ((t | (int)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (int)dst) & wmask || length < wsize)
+ t = length;
+ else
+ t = wsize - (t & wmask);
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (int)src;
+ if ((t | (int)dst) & wmask) {
+ if ((t ^ (int)dst) & wmask || length <= wsize)
+ t = length;
+ else
+ t &= wmask;
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+#if defined(MEMCOPY) || defined(MEMMOVE)
+ return (dst0);
+#else
+ return;
+#endif
+}
diff --git a/storage/bdb/clib/raise.c b/storage/bdb/clib/raise.c
new file mode 100644
index 00000000000..fcf3bbcbd7f
--- /dev/null
+++ b/storage/bdb/clib/raise.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: raise.c,v 11.6 2002/01/11 15:51:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+/*
+ * raise --
+ * Send a signal to the current process.
+ *
+ * PUBLIC: #ifndef HAVE_RAISE
+ * PUBLIC: int raise __P((int));
+ * PUBLIC: #endif
+ */
+int
+raise(s)
+ int s;
+{
+ /*
+ * Do not use __os_id(), as it may not return the process ID -- any
+ * system with kill(3) probably has getpid(3).
+ */
+ return (kill(getpid(), s));
+}
diff --git a/storage/bdb/clib/snprintf.c b/storage/bdb/clib/snprintf.c
new file mode 100644
index 00000000000..fa1a63425e8
--- /dev/null
+++ b/storage/bdb/clib/snprintf.c
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: snprintf.c,v 11.10 2002/01/11 15:51:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * snprintf --
+ * Bounded version of sprintf.
+ *
+ * PUBLIC: #ifndef HAVE_SNPRINTF
+ * PUBLIC: int snprintf __P((char *, size_t, const char *, ...));
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_SNPRINTF
+int
+#ifdef __STDC__
+snprintf(char *str, size_t n, const char *fmt, ...)
+#else
+snprintf(str, n, fmt, va_alist)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ static int ret_charpnt = -1;
+ va_list ap;
+ int len;
+
+ COMPQUIET(n, 0);
+
+ /*
+ * Some old versions of sprintf return a pointer to the first argument
+ * instead of a character count. Assume the return value of snprintf,
+ * vsprintf, etc. will be the same as sprintf, and check the easy one.
+ *
+ * We do this test at run-time because it's not a test we can do in a
+ * cross-compilation environment.
+ */
+ if (ret_charpnt == -1) {
+ char buf[10];
+
+ ret_charpnt =
+ sprintf(buf, "123") != 3 ||
+ sprintf(buf, "123456789") != 9 ||
+ sprintf(buf, "1234") != 4;
+ }
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ len = vsprintf(str, fmt, ap);
+ va_end(ap);
+ return (ret_charpnt ? (int)strlen(str) : len);
+}
+#endif
diff --git a/storage/bdb/clib/strcasecmp.c b/storage/bdb/clib/strcasecmp.c
new file mode 100644
index 00000000000..d5ce6d76d5f
--- /dev/null
+++ b/storage/bdb/clib/strcasecmp.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 1987, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strcasecmp.c,v 1.7 2001/11/15 17:51:38 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+/*
+ * This array is designed for mapping upper and lower case letter
+ * together for a case independent comparison. The mappings are
+ * based upon ascii character sequences.
+ */
+static const unsigned char charmap[] = {
+ '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007',
+ '\010', '\011', '\012', '\013', '\014', '\015', '\016', '\017',
+ '\020', '\021', '\022', '\023', '\024', '\025', '\026', '\027',
+ '\030', '\031', '\032', '\033', '\034', '\035', '\036', '\037',
+ '\040', '\041', '\042', '\043', '\044', '\045', '\046', '\047',
+ '\050', '\051', '\052', '\053', '\054', '\055', '\056', '\057',
+ '\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067',
+ '\070', '\071', '\072', '\073', '\074', '\075', '\076', '\077',
+ '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\133', '\134', '\135', '\136', '\137',
+ '\140', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
+ '\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
+ '\170', '\171', '\172', '\173', '\174', '\175', '\176', '\177',
+ '\200', '\201', '\202', '\203', '\204', '\205', '\206', '\207',
+ '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217',
+ '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227',
+ '\230', '\231', '\232', '\233', '\234', '\235', '\236', '\237',
+ '\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247',
+ '\250', '\251', '\252', '\253', '\254', '\255', '\256', '\257',
+ '\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267',
+ '\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277',
+ '\300', '\301', '\302', '\303', '\304', '\305', '\306', '\307',
+ '\310', '\311', '\312', '\313', '\314', '\315', '\316', '\317',
+ '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327',
+ '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337',
+ '\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
+ '\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
+ '\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367',
+ '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377'
+};
+
+/*
+ * strcasecmp --
+ * Do strcmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: #ifndef HAVE_STRCASECMP
+ * PUBLIC: int strcasecmp __P((const char *, const char *));
+ * PUBLIC: #endif
+ */
+int
+strcasecmp(s1, s2)
+ const char *s1, *s2;
+{
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ while (cm[*us1] == cm[*us2++])
+ if (*us1++ == '\0')
+ return (0);
+ return (cm[*us1] - cm[*--us2]);
+}
+
+/*
+ * strncasecmp --
+ * Do strncmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: #ifndef HAVE_STRCASECMP
+ * PUBLIC: int strncasecmp __P((const char *, const char *, size_t));
+ * PUBLIC: #endif
+ */
+int
+strncasecmp(s1, s2, n)
+ const char *s1, *s2;
+ register size_t n;
+{
+ if (n != 0) {
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ do {
+ if (cm[*us1] != cm[*us2++])
+ return (cm[*us1] - cm[*--us2]);
+ if (*us1++ == '\0')
+ break;
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/storage/bdb/clib/strdup.c b/storage/bdb/clib/strdup.c
new file mode 100644
index 00000000000..e68623f1407
--- /dev/null
+++ b/storage/bdb/clib/strdup.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strdup.c,v 1.5 2002/05/01 18:40:05 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+/*
+ * strdup --
+ *
+ * PUBLIC: #ifndef HAVE_STRDUP
+ * PUBLIC: char *strdup __P((const char *));
+ * PUBLIC: #endif
+ */
+char *
+strdup(str)
+ const char *str;
+{
+ size_t len;
+ char *copy;
+
+ len = strlen(str) + 1;
+ if (!(copy = malloc((u_int)len)))
+ return (NULL);
+ memcpy(copy, str, len);
+ return (copy);
+}
diff --git a/storage/bdb/clib/strerror.c b/storage/bdb/clib/strerror.c
new file mode 100644
index 00000000000..06c28946b88
--- /dev/null
+++ b/storage/bdb/clib/strerror.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strerror.c,v 11.6 2002/01/11 15:51:29 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * strerror --
+ * Return the string associated with an errno.
+ *
+ * PUBLIC: #ifndef HAVE_STRERROR
+ * PUBLIC: char *strerror __P((int));
+ * PUBLIC: #endif
+ */
+char *
+strerror(num)
+ int num;
+{
+ extern int sys_nerr;
+ extern char *sys_errlist[];
+#undef UPREFIX
+#define UPREFIX "Unknown error: "
+ static char ebuf[40] = UPREFIX; /* 64-bit number + slop */
+ int errnum;
+ char *p, *t, tmp[40];
+
+ errnum = num; /* convert to unsigned */
+ if (errnum < sys_nerr)
+ return(sys_errlist[errnum]);
+
+ /* Do this by hand, so we don't include stdio(3). */
+ t = tmp;
+ do {
+ *t++ = "0123456789"[errnum % 10];
+ } while (errnum /= 10);
+ for (p = ebuf + sizeof(UPREFIX) - 1;;) {
+ *p++ = *--t;
+ if (t <= tmp)
+ break;
+ }
+ return(ebuf);
+}
diff --git a/storage/bdb/clib/vsnprintf.c b/storage/bdb/clib/vsnprintf.c
new file mode 100644
index 00000000000..4ffea8cb0ad
--- /dev/null
+++ b/storage/bdb/clib/vsnprintf.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: vsnprintf.c,v 11.7 2002/01/11 15:51:29 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * vsnprintf --
+ * Bounded version of vsprintf.
+ *
+ * PUBLIC: #ifndef HAVE_VSNPRINTF
+ * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list));
+ * PUBLIC: #endif
+ */
+#ifndef HAVE_VSNPRINTF
+int
+vsnprintf(str, n, fmt, ap)
+ char *str;
+ size_t n;
+ const char *fmt;
+ va_list ap;
+{
+ COMPQUIET(n, 0);
+
+#ifdef SPRINTF_RET_CHARPNT
+ (void)vsprintf(str, fmt, ap);
+ return (strlen(str));
+#else
+ return (vsprintf(str, fmt, ap));
+#endif
+}
+#endif
diff --git a/storage/bdb/common/db_byteorder.c b/storage/bdb/common/db_byteorder.c
new file mode 100644
index 00000000000..d42d8e6a958
--- /dev/null
+++ b/storage/bdb/common/db_byteorder.c
@@ -0,0 +1,74 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_byteorder.c,v 11.8 2002/02/01 18:15:29 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_isbigendian --
+ * Return 1 if big-endian (Motorola and Sparc), not little-endian
+ * (Intel and Vax). We do this work at run-time, rather than at
+ * configuration time so cross-compilation and general embedded
+ * system support is simpler.
+ *
+ * PUBLIC: int __db_isbigendian __P((void));
+ */
+int
+__db_isbigendian()
+{
+ union { /* From Harbison & Steele. */
+ long l;
+ char c[sizeof(long)];
+ } u;
+
+ u.l = 1;
+ return (u.c[sizeof(long) - 1] == 1);
+}
+
+/*
+ * __db_byteorder --
+ * Return if we need to do byte swapping, checking for illegal
+ * values.
+ *
+ * PUBLIC: int __db_byteorder __P((DB_ENV *, int));
+ */
+int
+__db_byteorder(dbenv, lorder)
+ DB_ENV *dbenv;
+ int lorder;
+{
+ int is_bigendian;
+
+ is_bigendian = __db_isbigendian();
+
+ switch (lorder) {
+ case 0:
+ break;
+ case 1234:
+ if (is_bigendian)
+ return (DB_SWAPBYTES);
+ break;
+ case 4321:
+ if (!is_bigendian)
+ return (DB_SWAPBYTES);
+ break;
+ default:
+ __db_err(dbenv,
+ "unsupported byte order, only big and little-endian supported");
+ return (EINVAL);
+ }
+ return (0);
+}
diff --git a/storage/bdb/common/db_err.c b/storage/bdb/common/db_err.c
new file mode 100644
index 00000000000..7c9ee3c4fde
--- /dev/null
+++ b/storage/bdb/common/db_err.c
@@ -0,0 +1,579 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_err.c,v 11.80 2002/07/30 01:21:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * __db_fchk --
+ * General flags checking routine.
+ *
+ * PUBLIC: int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+ */
+int
+__db_fchk(dbenv, name, flags, ok_flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, ok_flags;
+{
+ return (LF_ISSET(~ok_flags) ? __db_ferr(dbenv, name, 0) : 0);
+}
+
+/*
+ * __db_fcchk --
+ * General combination flags checking routine.
+ *
+ * PUBLIC: int __db_fcchk
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__db_fcchk(dbenv, name, flags, flag1, flag2)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags, flag1, flag2;
+{
+ return (LF_ISSET(flag1) &&
+ LF_ISSET(flag2) ? __db_ferr(dbenv, name, 1) : 0);
+}
+
+/*
+ * __db_ferr --
+ * Common flag errors.
+ *
+ * PUBLIC: int __db_ferr __P((const DB_ENV *, const char *, int));
+ */
+int
+__db_ferr(dbenv, name, iscombo)
+ const DB_ENV *dbenv;
+ const char *name;
+ int iscombo;
+{
+ __db_err(dbenv, "illegal flag %sspecified to %s",
+ iscombo ? "combination " : "", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_pgerr --
+ * Error when unable to retrieve a specified page.
+ *
+ * PUBLIC: void __db_pgerr __P((DB *, db_pgno_t, int));
+ */
+void
+__db_pgerr(dbp, pgno, errval)
+ DB *dbp;
+ db_pgno_t pgno;
+ int errval;
+{
+ /*
+ * Three things are certain:
+ * Death, taxes, and lost data.
+ * Guess which has occurred.
+ */
+ __db_err(dbp->dbenv,
+ "unable to create/retrieve page %lu", (u_long)pgno);
+ (void)__db_panic(dbp->dbenv, errval);
+}
+
+/*
+ * __db_pgfmt --
+ * Error when a page has the wrong format.
+ *
+ * PUBLIC: int __db_pgfmt __P((DB_ENV *, db_pgno_t));
+ */
+int
+__db_pgfmt(dbenv, pgno)
+ DB_ENV *dbenv;
+ db_pgno_t pgno;
+{
+ __db_err(dbenv, "page %lu: illegal page type or format", (u_long)pgno);
+ return (__db_panic(dbenv, EINVAL));
+}
+
+/*
+ * __db_eopnotsup --
+ * Common operation not supported message.
+ *
+ * PUBLIC: int __db_eopnotsup __P((const DB_ENV *));
+ */
+int
+__db_eopnotsup(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv, "operation not supported");
+#ifdef EOPNOTSUPP
+ return (EOPNOTSUPP);
+#else
+ return (EINVAL);
+#endif
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __db_assert --
+ * Error when an assertion fails. Only checked if #DIAGNOSTIC defined.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __db_assert __P((const char *, const char *, int));
+ * PUBLIC: #endif
+ */
+void
+__db_assert(failedexpr, file, line)
+ const char *failedexpr, *file;
+ int line;
+{
+ (void)fprintf(stderr,
+ "__db_assert: \"%s\" failed: file \"%s\", line %d\n",
+ failedexpr, file, line);
+ (void)fflush(stderr);
+
+ /* We want a stack trace of how this could possibly happen. */
+ abort();
+
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __db_panic_msg --
+ * Just report that someone else paniced.
+ *
+ * PUBLIC: int __db_panic_msg __P((DB_ENV *));
+ */
+int
+__db_panic_msg(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "fatal region error detected; run recovery");
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * __db_panic --
+ * Lock out the tree due to unrecoverable error.
+ *
+ * PUBLIC: int __db_panic __P((DB_ENV *, int));
+ */
+int
+__db_panic(dbenv, errval)
+ DB_ENV *dbenv;
+ int errval;
+{
+ if (dbenv != NULL) {
+ PANIC_SET(dbenv, 1);
+
+ dbenv->panic_errval = errval;
+
+ __db_err(dbenv, "PANIC: %s", db_strerror(errval));
+
+ if (dbenv->db_paniccall != NULL)
+ dbenv->db_paniccall(dbenv, errval);
+ }
+
+#if defined(DIAGNOSTIC) && !defined(CONFIG_TEST)
+ /*
+ * We want a stack trace of how this could possibly happen.
+ *
+ * Don't drop core if it's the test suite -- it's reasonable for the
+ * test suite to check to make sure that DB_RUNRECOVERY is returned
+ * under certain conditions.
+ */
+ abort();
+#endif
+
+ /*
+ * Chaos reigns within.
+ * Reflect, repent, and reboot.
+ * Order shall return.
+ */
+ return (DB_RUNRECOVERY);
+}
+
+/*
+ * db_strerror --
+ * ANSI C strerror(3) for DB.
+ *
+ * EXTERN: char *db_strerror __P((int));
+ */
+char *
+db_strerror(error)
+ int error;
+{
+ if (error == 0)
+ return ("Successful return: 0");
+ if (error > 0)
+ return (strerror(error));
+
+ /*
+ * !!!
+ * The Tcl API requires that some of these return strings be compared
+ * against strings stored in application scripts. So, any of these
+ * errors that do not invariably result in a Tcl exception may not be
+ * altered.
+ */
+ switch (error) {
+ case DB_DONOTINDEX:
+ return ("DB_DONOTINDEX: Secondary index callback returns null");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return
+ ("DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_NOSERVER:
+ return ("DB_NOSERVER: Fatal error, no server");
+ case DB_NOSERVER_HOME:
+ return ("DB_NOSERVER_HOME: Home unrecognized at server");
+ case DB_NOSERVER_ID:
+ return ("DB_NOSERVER_ID: Identifier unrecognized at server");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_OLD_VERSION:
+ return ("DB_OLDVERSION: Database requires a version upgrade");
+ case DB_PAGE_NOTFOUND:
+ return ("DB_PAGE_NOTFOUND: Requested page not found");
+ case DB_REP_DUPMASTER:
+ return ("DB_REP_DUPMASTER: A second master site appeared");
+ case DB_REP_HOLDELECTION:
+ return ("DB_REP_HOLDELECTION: Need to hold an election");
+ case DB_REP_NEWMASTER:
+ return ("DB_REP_NEWMASTER: A new master has declared itself");
+ case DB_REP_NEWSITE:
+ return ("DB_REP_NEWSITE: A new site has entered the system");
+ case DB_REP_OUTDATED:
+ return
+ ("DB_REP_OUTDATED: Insufficient logs on master to recover");
+ case DB_REP_UNAVAIL:
+ return ("DB_REP_UNAVAIL: Unable to elect a master");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ case DB_SECONDARY_BAD:
+ return
+ ("DB_SECONDARY_BAD: Secondary index item missing from primary");
+ case DB_VERIFY_BAD:
+ return ("DB_VERIFY_BAD: Database verification failed");
+ default: {
+ /*
+ * !!!
+ * Room for a 64-bit number + slop. This buffer is only used
+ * if we're given an unknown error, which should never happen.
+ * Note, however, we're no longer thread-safe if it does.
+ */
+ static char ebuf[40];
+
+ (void)snprintf(ebuf, sizeof(ebuf), "Unknown error: %d", error);
+ return (ebuf);
+ }
+ }
+}
+
+/*
+ * __db_err --
+ * Standard DB error routine. The same as errx, except we don't write
+ * to stderr if no output mechanism was specified.
+ *
+ * PUBLIC: void __db_err __P((const DB_ENV *, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_err(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__db_err(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 0, fmt);
+}
+
+/*
+ * __db_errcall --
+ * Do the error message work for callback functions.
+ *
+ * PUBLIC: void __db_errcall
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
+ */
+void
+__db_errcall(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ char *p;
+ char errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ p = errbuf;
+ if (fmt != NULL)
+ p += vsnprintf(errbuf, sizeof(errbuf), fmt, ap);
+ if (error_set)
+ p += snprintf(p,
+ sizeof(errbuf) - (p - errbuf), ": %s", db_strerror(error));
+ /*
+ * !!!
+ * We're potentially manipulating strings handed us by the application,
+ * and on systems without a real snprintf() the sprintf() calls could
+ * have overflowed the buffer. We can't do anything about it now, but
+ * we don't want to return control to the application, we might have
+ * overwritten the stack with a Trojan horse. We're not trying to do
+ * anything recoverable here because systems without snprintf support
+ * are pretty rare anymore.
+ */
+ if ((size_t)(p - errbuf) > sizeof(errbuf)) {
+ (void)fprintf(stderr,
+ "Berkeley DB: error callback interface buffer overflow\n");
+ (void)fflush(stderr);
+
+ abort();
+ /* NOTREACHED */
+ }
+
+ dbenv->db_errcall(dbenv->db_errpfx, errbuf);
+}
+
+/*
+ * __db_errfile --
+ * Do the error message work for FILE *s.
+ *
+ * PUBLIC: void __db_errfile
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
+ */
+void
+__db_errfile(dbenv, error, error_set, fmt, ap)
+ const DB_ENV *dbenv;
+ int error, error_set;
+ const char *fmt;
+ va_list ap;
+{
+ FILE *fp;
+
+ fp = dbenv == NULL ||
+ dbenv->db_errfile == NULL ? stderr : dbenv->db_errfile;
+
+ if (dbenv != NULL && dbenv->db_errpfx != NULL)
+ (void)fprintf(fp, "%s: ", dbenv->db_errpfx);
+ if (fmt != NULL) {
+ (void)vfprintf(fp, fmt, ap);
+ if (error_set)
+ (void)fprintf(fp, ": ");
+ }
+ if (error_set)
+ (void)fprintf(fp, "%s", db_strerror(error));
+ (void)fprintf(fp, "\n");
+ (void)fflush(fp);
+}
+
+/*
+ * __db_logmsg --
+ * Write information into the DB log.
+ *
+ * PUBLIC: void __db_logmsg __P((const DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, ...));
+ */
+void
+#ifdef __STDC__
+__db_logmsg(const DB_ENV *dbenv,
+ DB_TXN *txnid, const char *opname, u_int32_t flags, const char *fmt, ...)
+#else
+__db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist)
+ const DB_ENV *dbenv;
+ DB_TXN *txnid;
+ const char *opname, *fmt;
+ u_int32_t flags;
+ va_dcl
+#endif
+{
+ DBT opdbt, msgdbt;
+ DB_LSN lsn;
+ va_list ap;
+ char __logbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+
+ if (!LOGGING_ON(dbenv))
+ return;
+
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
+ memset(&opdbt, 0, sizeof(opdbt));
+ opdbt.data = (void *)opname;
+ opdbt.size = (u_int32_t)(strlen(opname) + 1);
+
+ memset(&msgdbt, 0, sizeof(msgdbt));
+ msgdbt.data = __logbuf;
+ msgdbt.size = vsnprintf(__logbuf, sizeof(__logbuf), fmt, ap);
+
+ /*
+ * XXX
+ * Explicitly discard the const. Otherwise, we have to const DB_ENV
+ * references throughout the logging subsystem.
+ */
+ __db_debug_log(
+ (DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0);
+
+ va_end(ap);
+}
+
+/*
+ * __db_unknown_flag -- report internal error
+ *
+ * PUBLIC: int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_unknown_flag(dbenv, routine, flag)
+ DB_ENV *dbenv;
+ char *routine;
+ u_int32_t flag;
+{
+ __db_err(dbenv, "%s: Unknown flag: 0x%x", routine, flag);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+/*
+ * __db_unknown_type -- report internal error
+ *
+ * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, DBTYPE));
+ */
+int
+__db_unknown_type(dbenv, routine, type)
+ DB_ENV *dbenv;
+ char *routine;
+ DBTYPE type;
+{
+ __db_err(dbenv, "%s: Unknown db type: 0x%x", routine, type);
+ DB_ASSERT(0);
+ return (EINVAL);
+}
+
+/*
+ * __db_check_txn --
+ * Check for common transaction errors.
+ *
+ * PUBLIC: int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int));
+ */
+int
+__db_check_txn(dbp, txn, assoc_lid, read_op)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t assoc_lid;
+ int read_op;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we are in recovery or aborting a transaction, then we
+ * don't need to enforce the rules about dbp's not allowing
+ * transactional operations in non-transactional dbps and
+ * vica-versa. This happens all the time as the dbp during
+ * an abort may be transactional, but we undo operations
+ * outside a transaction since we're aborting.
+ */
+ if (IS_RECOVERING(dbenv) || F_ISSET(dbp, DB_AM_RECOVER))
+ return (0);
+
+ /*
+ * Check for common transaction errors:
+ * Failure to pass a transaction handle to a DB operation
+ * Failure to configure the DB handle in a proper environment
+ * Operation on a handle whose open commit hasn't completed.
+ *
+ * Read operations don't require a txn even if we've used one before
+ * with this handle, although if they do have a txn, we'd better be
+ * prepared for it.
+ */
+ if (txn == NULL) {
+ if (!read_op && F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "DB handle previously used in transaction, missing transaction handle");
+ return (EINVAL);
+ }
+
+ if (dbp->cur_lid >= TXN_MINIMUM)
+ goto open_err;
+ } else {
+ if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid)
+ goto open_err;
+
+ if (!TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ if (!F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "Transaction specified for a DB handle opened outside a transaction");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If dbp->associate_lid is not DB_LOCK_INVALIDID, that means we're in
+ * the middle of a DB->associate with DB_CREATE (i.e., a secondary index
+ * creation).
+ *
+ * In addition to the usual transaction rules, we need to lock out
+ * non-transactional updates that aren't part of the associate (and
+ * thus are using some other locker ID).
+ *
+ * Transactional updates should simply block; from the time we
+ * decide to build the secondary until commit, we'll hold a write
+ * lock on all of its pages, so it should be safe to attempt to update
+ * the secondary in another transaction (presumably by updating the
+ * primary).
+ */
+ if (!read_op && dbp->associate_lid != DB_LOCK_INVALIDID &&
+ txn != NULL && dbp->associate_lid != assoc_lid) {
+ __db_err(dbenv,
+ "Operation forbidden while secondary index is being created");
+ return (EINVAL);
+ }
+
+ return (0);
+open_err:
+ __db_err(dbenv,
+ "Transaction that opened the DB handle is still active");
+ return (EINVAL);
+}
+
+/*
+ * __db_not_txn_env --
+ * DB handle must be in an environment that supports transactions.
+ *
+ * PUBLIC: int __db_not_txn_env __P((DB_ENV *));
+ */
+int
+__db_not_txn_env(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "DB environment not configured for transactions");
+ return (EINVAL);
+}
diff --git a/storage/bdb/common/db_getlong.c b/storage/bdb/common/db_getlong.c
new file mode 100644
index 00000000000..6ba8ebfcdaa
--- /dev/null
+++ b/storage/bdb/common/db_getlong.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_getlong.c,v 11.18 2002/03/28 20:13:33 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_getlong --
+ * Return a long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getlong
+ * PUBLIC: __P((DB *, const char *, char *, long, long, long *));
+ */
+int
+__db_getlong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ long min, max, *storep;
+{
+ long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtol(p, &end, 10);
+ if ((val == LONG_MIN || val == LONG_MAX) &&
+ __os_get_errno() == ERANGE) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%ld)\n",
+ progname, p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%ld)", p, min);
+ return (1);
+ }
+ if (val > max) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%ld)\n",
+ progname, p, max);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%ld)", p, max);
+ return (1);
+ }
+ *storep = val;
+ return (0);
+}
+
+/*
+ * __db_getulong --
+ * Return an unsigned long value inside of basic parameters.
+ *
+ * PUBLIC: int __db_getulong
+ * PUBLIC: __P((DB *, const char *, char *, u_long, u_long, u_long *));
+ */
+int
+__db_getulong(dbp, progname, p, min, max, storep)
+ DB *dbp;
+ const char *progname;
+ char *p;
+ u_long min, max, *storep;
+{
+#if !defined(HAVE_STRTOUL)
+ COMPQUIET(min, 0);
+
+ return (__db_getlong(dbp, progname, p, 0, max, (long *)storep));
+#else
+ u_long val;
+ char *end;
+
+ __os_set_errno(0);
+ val = strtoul(p, &end, 10);
+ if (val == ULONG_MAX && __os_get_errno() == ERANGE) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: %s\n", progname, p, strerror(ERANGE));
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
+ return (1);
+ }
+ if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Invalid numeric argument\n", progname, p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ return (1);
+ }
+ if (val < min) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Less than minimum value (%lu)\n",
+ progname, p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%lu)", p, min);
+ return (1);
+ }
+
+ /*
+ * We allow a 0 to substitute as a max value for ULONG_MAX because
+ * 1) accepting only a 0 value is unlikely to be necessary, and 2)
+ * we don't want callers to have to use ULONG_MAX explicitly, as it
+ * may not exist on all platforms.
+ */
+ if (max != 0 && val > max) {
+ if (dbp == NULL)
+ fprintf(stderr,
+ "%s: %s: Greater than maximum value (%lu)\n",
+ progname, p, max);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%lu)", p, max);
+ return (1);
+ }
+ *storep = val;
+ return (0);
+#endif /* !defined(HAVE_STRTOUL) */
+}
diff --git a/storage/bdb/common/db_idspace.c b/storage/bdb/common/db_idspace.c
new file mode 100644
index 00000000000..588ffd9fca9
--- /dev/null
+++ b/storage/bdb/common/db_idspace.c
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_idspace.c,v 1.5 2002/02/01 18:15:29 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_idcmp __P((const void *, const void *));
+
+static int
+__db_idcmp(a, b)
+ const void *a;
+ const void *b;
+{
+ u_int32_t i, j;
+
+ i = *(u_int32_t *)a;
+ j = *(u_int32_t *)b;
+
+ if (i < j)
+ return (-1);
+ else if (i > j)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * __db_idspace --
+ *
+ * On input, minp and maxp contain the minimum and maximum valid values for
+ * the name space and on return, they contain the minimum and maximum ids
+ * available (by finding the biggest gap).
+ *
+ * PUBLIC: void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *));
+ */
+void
+__db_idspace(inuse, n, minp, maxp)
+ u_int32_t *inuse;
+ int n;
+ u_int32_t *minp, *maxp;
+{
+ int i, low;
+ u_int32_t gap, t;
+
+ /* A single locker ID is a special case. */
+ if (n == 1) {
+ /*
+ * If the single item in use is the last one in the range,
+ * then we've got to perform wrap which means that we set
+ * the min to the minimum ID, which is what we came in with,
+ * so we don't do anything.
+ */
+ if (inuse[0] != *maxp)
+ *minp = inuse[0];
+ *maxp = inuse[0] - 1;
+ return;
+ }
+
+ gap = 0;
+ low = 0;
+ qsort(inuse, n, sizeof(u_int32_t), __db_idcmp);
+ for (i = 0; i < n - 1; i++)
+ if ((t = (inuse[i + 1] - inuse[i])) > gap) {
+ gap = t;
+ low = i;
+ }
+
+ /* Check for largest gap at the end of the space. */
+ if ((*maxp - inuse[n - 1]) + (inuse[0] - *minp) > gap) {
+ /* Do same check as we do in the n == 1 case. */
+ if (inuse[n - 1] != *maxp)
+ *minp = inuse[n - 1];
+ *maxp = inuse[0];
+ } else {
+ *minp = inuse[low];
+ *maxp = inuse[low + 1];
+ }
+}
diff --git a/storage/bdb/common/db_log2.c b/storage/bdb/common/db_log2.c
new file mode 100644
index 00000000000..cdd87dda11d
--- /dev/null
+++ b/storage/bdb/common/db_log2.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_log2.c,v 11.7 2002/02/01 18:15:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * PUBLIC: u_int32_t __db_log2 __P((u_int32_t));
+ */
+u_int32_t
+__db_log2(num)
+ u_int32_t num;
+{
+ u_int32_t i, limit;
+
+ limit = 1;
+ for (i = 0; limit < num; limit = limit << 1)
+ ++i;
+ return (i);
+}
diff --git a/storage/bdb/common/util_arg.c b/storage/bdb/common/util_arg.c
new file mode 100644
index 00000000000..e034e3bd194
--- /dev/null
+++ b/storage/bdb/common/util_arg.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_arg.c,v 1.4 2002/02/01 18:15:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+static char *__db_strsep __P((char **, const char *));
+
+/*
+ * __db_util_arg --
+ * Convert a string into an argc/argv pair.
+ *
+ * PUBLIC: int __db_util_arg __P((char *, char *, int *, char ***));
+ */
+int
+__db_util_arg(arg0, str, argcp, argvp)
+ char *arg0, *str, ***argvp;
+ int *argcp;
+{
+ int n, ret;
+ char **ap, **argv;
+
+#define MAXARGS 25
+ if ((ret =
+ __os_malloc(NULL, (MAXARGS + 1) * sizeof(char **), &argv)) != 0)
+ return (ret);
+
+ ap = argv;
+ *ap++ = arg0;
+ for (n = 1; (*ap = __db_strsep(&str, " \t")) != NULL;)
+ if (**ap != '\0') {
+ ++ap;
+ if (++n == MAXARGS)
+ break;
+ }
+ *ap = NULL;
+
+ *argcp = ap - argv;
+ *argvp = argv;
+
+ return (0);
+}
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Get next token from string *stringp, where tokens are possibly-empty
+ * strings separated by characters from delim.
+ *
+ * Writes NULs into the string at *stringp to end tokens.
+ * delim need not remain constant from call to call.
+ * On return, *stringp points past the last NUL written (if there might
+ * be further tokens), or is NULL (if there are definitely no more tokens).
+ *
+ * If *stringp is NULL, strsep returns NULL.
+ */
+static char *
+__db_strsep(stringp, delim)
+ char **stringp;
+ const char *delim;
+{
+ const char *spanp;
+ int c, sc;
+ char *s, *tok;
+
+ if ((s = *stringp) == NULL)
+ return (NULL);
+ for (tok = s;;) {
+ c = *s++;
+ spanp = delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+ else
+ s[-1] = 0;
+ *stringp = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
diff --git a/storage/bdb/common/util_cache.c b/storage/bdb/common/util_cache.c
new file mode 100644
index 00000000000..5ca88665cc7
--- /dev/null
+++ b/storage/bdb/common/util_cache.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_cache.c,v 1.3 2002/04/04 18:50:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_util_cache --
+ * Compute if we have enough cache.
+ *
+ * PUBLIC: int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *));
+ */
+int
+__db_util_cache(dbenv, dbp, cachep, resizep)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t *cachep;
+ int *resizep;
+{
+ DBTYPE type;
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ u_int32_t pgsize;
+ int ret;
+ void *sp;
+
+ /*
+ * The current cache size is in cachep. If it's insufficient, set the
+ * the memory referenced by resizep to 1 and set cachep to the minimum
+ * size needed.
+ */
+ if ((ret = dbp->get_type(dbp, &type)) != 0) {
+ dbenv->err(dbenv, ret, "DB->get_type");
+ return (ret);
+ }
+
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbenv->err(dbenv, ret, "DB->stat");
+ return (ret);
+ }
+
+ switch (type) {
+ case DB_QUEUE:
+ qsp = (DB_QUEUE_STAT *)sp;
+ pgsize = qsp->qs_pagesize;
+ break;
+ case DB_HASH:
+ hsp = (DB_HASH_STAT *)sp;
+ pgsize = hsp->hash_pagesize;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ bsp = (DB_BTREE_STAT *)sp;
+ pgsize = bsp->bt_pagesize;
+ break;
+ default:
+ dbenv->err(dbenv, ret, "unknown database type: %d", type);
+ return (EINVAL);
+ }
+ free(sp);
+
+ /*
+ * Make sure our current cache is big enough. We want at least
+ * DB_MINPAGECACHE pages in the cache.
+ */
+ if ((*cachep / pgsize) < DB_MINPAGECACHE) {
+ *resizep = 1;
+ *cachep = pgsize * DB_MINPAGECACHE;
+ } else
+ *resizep = 0;
+
+ return (0);
+}
diff --git a/storage/bdb/common/util_log.c b/storage/bdb/common/util_log.c
new file mode 100644
index 00000000000..ae215fca64a
--- /dev/null
+++ b/storage/bdb/common/util_log.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_log.c,v 1.11 2002/02/01 18:15:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_util_logset --
+ * Log that we're running.
+ *
+ * PUBLIC: int __db_util_logset __P((const char *, char *));
+ */
+int
+__db_util_logset(progname, fname)
+ const char *progname;
+ char *fname;
+{
+ FILE *fp;
+ time_t now;
+ u_int32_t id;
+
+ if ((fp = fopen(fname, "w")) == NULL)
+ goto err;
+
+ (void)time(&now);
+ __os_id(&id);
+ fprintf(fp, "%s: %lu %s", progname, (u_long)id, ctime(&now));
+
+ if (fclose(fp) == EOF)
+ goto err;
+
+ return (0);
+
+err: fprintf(stderr, "%s: %s: %s\n", progname, fname, strerror(errno));
+ return (1);
+}
diff --git a/storage/bdb/common/util_sig.c b/storage/bdb/common/util_sig.c
new file mode 100644
index 00000000000..9714427ad33
--- /dev/null
+++ b/storage/bdb/common/util_sig.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_sig.c,v 1.7 2002/02/02 17:04:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <signal.h>
+#endif
+
+#include "db_int.h"
+
+static int interrupt;
+static void onint __P((int));
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+static void
+onint(signo)
+ int signo;
+{
+ if ((interrupt = signo) == 0)
+ interrupt = SIGINT;
+}
+
+/*
+ * __db_util_siginit --
+ *
+ * PUBLIC: void __db_util_siginit __P((void));
+ */
+void
+__db_util_siginit()
+{
+ /*
+ * Initialize the set of signals for which we want to clean up.
+ * Generally, we try not to leave the shared regions locked if
+ * we can.
+ */
+#ifdef SIGHUP
+ (void)signal(SIGHUP, onint);
+#endif
+ (void)signal(SIGINT, onint);
+#ifdef SIGPIPE
+ (void)signal(SIGPIPE, onint);
+#endif
+ (void)signal(SIGTERM, onint);
+}
+
+/*
+ * __db_util_interrupted --
+ * Return if interrupted.
+ *
+ * PUBLIC: int __db_util_interrupted __P((void));
+ */
+int
+__db_util_interrupted()
+{
+ return (interrupt != 0);
+}
+
+/*
+ * __db_util_sigresend --
+ *
+ * PUBLIC: void __db_util_sigresend __P((void));
+ */
+void
+__db_util_sigresend()
+{
+ /* Resend any caught signal. */
+ if (interrupt != 0) {
+ (void)signal(interrupt, SIG_DFL);
+ (void)raise(interrupt);
+ /* NOTREACHED */
+ }
+}
diff --git a/storage/bdb/cxx/cxx_db.cpp b/storage/bdb/cxx/cxx_db.cpp
new file mode 100644
index 00000000000..7e50a9b3f27
--- /dev/null
+++ b/storage/bdb/cxx/cxx_db.cpp
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_db.cpp,v 11.71 2002/08/26 22:13:36 mjc Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DB_METHOD(_name, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_CHECKED(_name, _cleanup, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ if (!db) { \
+ DB_ERROR("Db::" # _name, EINVAL, error_policy()); \
+ return (EINVAL); \
+ } \
+ if (_cleanup) \
+ cleanup(); \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_QUIET(_name, _argspec, _arglist) \
+int Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ return (db->_name _arglist); \
+}
+
+#define DB_METHOD_VOID(_name, _argspec, _arglist) \
+void Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ db->_name _arglist; \
+}
+
+// A truism for the Db object is that there is a valid
+// DB handle from the constructor until close().
+// After the close, the DB handle is invalid and
+// no operations are permitted on the Db (other than
+// destructor). Leaving the Db handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow Db objects to be closed and reopened.
+// This implied always keeping a valid DB object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+Db::Db(DbEnv *env, u_int32_t flags)
+: imp_(0)
+, env_(env)
+, construct_error_(0)
+, flags_(0)
+, construct_flags_(flags)
+, append_recno_callback_(0)
+, associate_callback_(0)
+, bt_compare_callback_(0)
+, bt_prefix_callback_(0)
+, dup_compare_callback_(0)
+, feedback_callback_(0)
+, h_hash_callback_(0)
+{
+ if (env_ == 0)
+ flags_ |= DB_CXX_PRIVATE_ENV;
+
+ if ((construct_error_ = initialize()) != 0)
+ DB_ERROR("Db::Db", construct_error_, error_policy());
+}
+
+// If the DB handle is still open, we close it. This is to make stack
+// allocation of Db objects easier so that they are cleaned up in the error
+// path. If the environment was closed prior to this, it may cause a trap, but
+// an error message is generated during the environment close. Applications
+// should call close explicitly in normal (non-exceptional) cases to check the
+// return value.
+//
+Db::~Db()
+{
+ DB *db;
+
+ db = unwrap(this);
+ if (db != NULL) {
+ cleanup();
+ (void)db->close(db, 0);
+ }
+}
+
+// private method to initialize during constructor.
+// initialize must create a backing DB object,
+// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
+//
+int Db::initialize()
+{
+ DB *db;
+ DB_ENV *cenv = unwrap(env_);
+ int ret;
+ u_int32_t cxx_flags;
+
+ cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
+
+ // Create a new underlying DB object.
+ // We rely on the fact that if a NULL DB_ENV* is given,
+ // one is allocated by DB.
+ //
+ if ((ret = db_create(&db, cenv,
+ construct_flags_ & ~cxx_flags)) != 0)
+ return (ret);
+
+ // Associate the DB with this object
+ imp_ = wrap(db);
+ db->api_internal = this;
+
+ // Create a new DbEnv from a DB_ENV* if it was created locally.
+ // It is deleted in Db::close().
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
+ env_ = new DbEnv(db->dbenv, cxx_flags);
+
+ return (0);
+}
+
+// private method to cleanup after destructor or during close.
+// If the environment was created by this Db object, we optionally
+// delete it, or return it so the caller can delete it after
+// last use.
+//
+void Db::cleanup()
+{
+ DB *db = unwrap(this);
+
+ if (db != NULL) {
+ // extra safety
+ db->api_internal = 0;
+ imp_ = 0;
+
+ // we must dispose of the DbEnv object if
+ // we created it. This will be the case
+ // if a NULL DbEnv was passed into the constructor.
+ // The underlying DB_ENV object will be inaccessible
+ // after the close, so we must clean it up now.
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
+ env_->cleanup();
+ delete env_;
+ env_ = 0;
+ }
+ }
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int Db::error_policy()
+{
+ if (env_ != NULL)
+ return (env_->error_policy());
+ else {
+ // If the env_ is null, that means that the user
+ // did not attach an environment, so the correct error
+ // policy can be deduced from constructor flags
+ // for this Db.
+ //
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+ }
+}
+
+int Db::close(u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ // after a DB->close (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = db->close(db, flags)) != 0)
+ DB_ERROR("Db::close", ret, error_policy());
+
+ return (ret);
+}
+
+// The following cast implies that Dbc can be no larger than DBC
+DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags),
+ (db, unwrap(txnid), (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(del, (DbTxn *txnid, Dbt *key, u_int32_t flags),
+ (db, unwrap(txnid), key, flags),
+ DB_RETOK_DBDEL)
+
+void Db::err(int error, const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, error, 1, 1, format);
+}
+
+void Db::errx(const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, 0, 0, 1, format);
+}
+
+DB_METHOD(fd, (int *fdp),
+ (db, fdp),
+ DB_RETOK_STD)
+
+int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->get(db, unwrap(txnid), key, value, flags);
+
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::get", value, error_policy());
+ else
+ DB_ERROR("Db::get", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+int Db::get_byteswapped(int *isswapped)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_byteswapped(db, isswapped));
+}
+
+int Db::get_type(DBTYPE *dbtype)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_type(db, dbtype));
+}
+
+// Dbc is a "compatible" subclass of DBC - that is, no virtual functions
+// or even extra data members, so these casts, although technically
+// non-portable, "should" always be okay.
+DB_METHOD(join, (Dbc **curslist, Dbc **cursorp, u_int32_t flags),
+ (db, (DBC **)curslist, (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(key_range,
+ (DbTxn *txnid, Dbt *key, DB_KEY_RANGE *results, u_int32_t flags),
+ (db, unwrap(txnid), key, results, flags),
+ DB_RETOK_STD)
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int Db::open(DbTxn *txnid, const char *file, const char *database,
+ DBTYPE type, u_int32_t flags, int mode)
+{
+ int ret;
+ DB *db = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = db->open(db, unwrap(txnid), file, database, type, flags,
+ mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::open", ret, error_policy());
+
+ return (ret);
+}
+
+int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->pget(db, unwrap(txnid), key, pkey, value, flags);
+
+ /* The logic here is identical to Db::get - reuse the macro. */
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::pget", value, error_policy());
+ else
+ DB_ERROR("Db::pget", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+DB_METHOD(put,
+ (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags),
+ (db, unwrap(txnid), key, value, flags),
+ DB_RETOK_DBPUT)
+
+DB_METHOD_CHECKED(rename, 1,
+ (const char *file, const char *database, const char *newname,
+ u_int32_t flags),
+ (db, file, database, newname, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(remove, 1,
+ (const char *file, const char *database, u_int32_t flags),
+ (db, file, database, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(truncate, 0,
+ (DbTxn *txnid, u_int32_t *countp, u_int32_t flags),
+ (db, unwrap(txnid), countp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(stat, 0,
+ (void *sp, u_int32_t flags), (db, sp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(sync, 0,
+ (u_int32_t flags), (db, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(upgrade, 0,
+ (const char *name, u_int32_t flags), (db, name, flags), DB_RETOK_STD)
+
+////////////////////////////////////////////////////////////////////////
+//
+// callbacks
+//
+// *_intercept_c are 'glue' functions that must be declared
+// as extern "C" so to be typesafe. Using a C++ method, even
+// a static class method with 'correct' arguments, will not pass
+// the test; some picky compilers do not allow mixing of function
+// pointers to 'C' functions with function pointers to C++ functions.
+//
+// One wart with this scheme is that the *_callback_ method pointer
+// must be declared public to be accessible by the C intercept.
+// It's possible to accomplish the goal without this, and with
+// another public transfer method, but it's just too much overhead.
+// These callbacks are supposed to be *fast*.
+//
+// The DBTs we receive in these callbacks from the C layer may be
+// manufactured there, but we want to treat them as a Dbts.
+// Technically speaking, these DBTs were not constructed as a Dbts,
+// but it should be safe to cast them as such given that Dbt is a
+// *very* thin extension of the DBT. That is, Dbt has no additional
+// data elements, does not use virtual functions, virtual inheritance,
+// multiple inheritance, RTI, or any other language feature that
+// causes the structure to grow or be displaced. Although this may
+// sound risky, a design goal of C++ is complete structure
+// compatibility with C, and has the philosophy 'if you don't use it,
+// you shouldn't incur the overhead'. If the C/C++ compilers you're
+// using on a given machine do not have matching struct layouts, then
+// a lot more things will be broken than just this.
+//
+// The alternative, creating a Dbt here in the callback, and populating
+// it from the DBT, is just too slow and cumbersome to be very useful.
+
+// These macros avoid a lot of boilerplate code for callbacks
+
+#define DB_CALLBACK_C_INTERCEPT(_name, _rettype, _cargspec, \
+ _return, _cxxargs) \
+extern "C" _rettype _db_##_name##_intercept_c _cargspec \
+{ \
+ Db *cxxthis; \
+ \
+ DB_ASSERT(cthis != NULL); \
+ cxxthis = (Db *)cthis->api_internal; \
+ DB_ASSERT(cxxthis != NULL); \
+ DB_ASSERT(cxxthis->_name##_callback_ != 0); \
+ \
+ _return (*cxxthis->_name##_callback_) _cxxargs; \
+}
+
+#define DB_SET_CALLBACK(_cxxname, _name, _cxxargspec, _cb) \
+int Db::_cxxname _cxxargspec \
+{ \
+ DB *cthis = unwrap(this); \
+ \
+ _name##_callback_ = _cb; \
+ return ((*(cthis->_cxxname))(cthis, \
+ (_cb) ? _db_##_name##_intercept_c : NULL)); \
+}
+
+/* associate callback - doesn't quite fit the pattern because of the flags */
+DB_CALLBACK_C_INTERCEPT(associate,
+ int, (DB *cthis, const DBT *key, const DBT *data, DBT *retval),
+ return, (cxxthis, Dbt::get_const_Dbt(key), Dbt::get_const_Dbt(data),
+ Dbt::get_Dbt(retval)))
+
+int Db::associate(DbTxn *txn, Db *secondary, int (*callback)(Db *, const Dbt *,
+ const Dbt *, Dbt *), u_int32_t flags)
+{
+ DB *cthis = unwrap(this);
+
+ /* Since the secondary Db is used as the first argument
+ * to the callback, we store the C++ callback on it
+ * rather than on 'this'.
+ */
+ secondary->associate_callback_ = callback;
+ return ((*(cthis->associate))(cthis, unwrap(txn), unwrap(secondary),
+ (callback) ? _db_associate_intercept_c : NULL, flags));
+}
+
+DB_CALLBACK_C_INTERCEPT(feedback,
+ void, (DB *cthis, int opcode, int pct),
+ /* no return */ (void), (cxxthis, opcode, pct))
+
+DB_SET_CALLBACK(set_feedback, feedback,
+ (void (*arg)(Db *cxxthis, int opcode, int pct)), arg)
+
+DB_CALLBACK_C_INTERCEPT(append_recno,
+ int, (DB *cthis, DBT *data, db_recno_t recno),
+ return, (cxxthis, Dbt::get_Dbt(data), recno))
+
+DB_SET_CALLBACK(set_append_recno, append_recno,
+ (int (*arg)(Db *cxxthis, Dbt *data, db_recno_t recno)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_compare, bt_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_prefix,
+ size_t, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_prefix, bt_prefix,
+ (size_t (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(dup_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_dup_compare, dup_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(h_hash,
+ u_int32_t, (DB *cthis, const void *data, u_int32_t len),
+ return, (cxxthis, data, len))
+
+DB_SET_CALLBACK(set_h_hash, h_hash,
+ (u_int32_t (*arg)(Db *cxxthis, const void *data, u_int32_t len)), arg)
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _verify_callback_c(void *handle, const void *str_arg)
+{
+ char *str;
+ __DB_OSTREAMCLASS *out;
+
+ str = (char *)str_arg;
+ out = (__DB_OSTREAMCLASS *)handle;
+
+ (*out) << str;
+ if (out->fail())
+ return (EIO);
+
+ return (0);
+}
+
+int Db::verify(const char *name, const char *subdb,
+ __DB_OSTREAMCLASS *ostr, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ if (!db)
+ ret = EINVAL;
+ else
+ ret = __db_verify_internal(db, name, subdb, ostr,
+ _verify_callback_c, flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::verify", ret, error_policy());
+
+ return (ret);
+}
+
+DB_METHOD(set_bt_compare, (bt_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_bt_maxkey, (u_int32_t bt_maxkey),
+ (db, bt_maxkey), DB_RETOK_STD)
+DB_METHOD(set_bt_minkey, (u_int32_t bt_minkey),
+ (db, bt_minkey), DB_RETOK_STD)
+DB_METHOD(set_bt_prefix, (bt_prefix_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_dup_compare, (dup_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_encrypt, (const char *passwd, int flags),
+ (db, passwd, flags), DB_RETOK_STD)
+DB_METHOD_VOID(set_errfile, (FILE *errfile), (db, errfile))
+DB_METHOD_VOID(set_errpfx, (const char *errpfx), (db, errpfx))
+DB_METHOD(set_flags, (u_int32_t flags), (db, flags),
+ DB_RETOK_STD)
+DB_METHOD(set_h_ffactor, (u_int32_t h_ffactor),
+ (db, h_ffactor), DB_RETOK_STD)
+DB_METHOD(set_h_hash, (h_hash_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_h_nelem, (u_int32_t h_nelem),
+ (db, h_nelem), DB_RETOK_STD)
+DB_METHOD(set_lorder, (int db_lorder), (db, db_lorder),
+ DB_RETOK_STD)
+DB_METHOD(set_pagesize, (u_int32_t db_pagesize),
+ (db, db_pagesize), DB_RETOK_STD)
+DB_METHOD(set_re_delim, (int re_delim),
+ (db, re_delim), DB_RETOK_STD)
+DB_METHOD(set_re_len, (u_int32_t re_len),
+ (db, re_len), DB_RETOK_STD)
+DB_METHOD(set_re_pad, (int re_pad),
+ (db, re_pad), DB_RETOK_STD)
+DB_METHOD(set_re_source, (char *re_source),
+ (db, re_source), DB_RETOK_STD)
+DB_METHOD(set_q_extentsize, (u_int32_t extentsize),
+ (db, extentsize), DB_RETOK_STD)
+
+DB_METHOD_QUIET(set_alloc, (db_malloc_fcn_type malloc_fcn,
+ db_realloc_fcn_type realloc_fcn, db_free_fcn_type free_fcn),
+ (db, malloc_fcn, realloc_fcn, free_fcn))
+
+void Db::set_errcall(void (*arg)(const char *, char *))
+{
+ env_->set_errcall(arg);
+}
+
+void *Db::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+void Db::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DB_METHOD(set_cachesize, (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (db, gbytes, bytes, ncache), DB_RETOK_STD)
+DB_METHOD(set_cache_priority, (DB_CACHE_PRIORITY priority),
+ (db, priority), DB_RETOK_STD)
+
+int Db::set_paniccall(void (*callback)(DbEnv *, int))
+{
+ return (env_->set_paniccall(callback));
+}
+
+void Db::set_error_stream(__DB_OSTREAMCLASS *error_stream)
+{
+ env_->set_error_stream(error_stream);
+}
diff --git a/storage/bdb/cxx/cxx_dbc.cpp b/storage/bdb/cxx/cxx_dbc.cpp
new file mode 100644
index 00000000000..4d5844f922f
--- /dev/null
+++ b/storage/bdb/cxx/cxx_dbc.cpp
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_dbc.cpp,v 11.55 2002/07/03 21:03:52 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBC_METHOD(_name, _argspec, _arglist, _retok) \
+int Dbc::_name _argspec \
+{ \
+ int ret; \
+ DBC *dbc = this; \
+ \
+ ret = dbc->c_##_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Dbc::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+// It's private, and should never be called, but VC4.0 needs it resolved
+//
+Dbc::~Dbc()
+{
+}
+
+DBC_METHOD(close, (void), (dbc), DB_RETOK_STD)
+DBC_METHOD(count, (db_recno_t *countp, u_int32_t _flags),
+ (dbc, countp, _flags), DB_RETOK_STD)
+DBC_METHOD(del, (u_int32_t _flags),
+ (dbc, _flags), DB_RETOK_DBCDEL)
+
+int Dbc::dup(Dbc** cursorp, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+ DBC *new_cursor = 0;
+
+ ret = dbc->c_dup(dbc, &new_cursor, _flags);
+
+ if (DB_RETOK_STD(ret))
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)new_cursor;
+ else
+ DB_ERROR("Dbc::dup", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_get(dbc, key, data, _flags);
+
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::get", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_pget(dbc, key, pkey, data, _flags);
+
+ /* Logic is the same as for Dbc::get - reusing macro. */
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::pget", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::pget", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::pget", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+DBC_METHOD(put, (Dbt* key, Dbt *data, u_int32_t _flags),
+ (dbc, key, data, _flags), DB_RETOK_DBCPUT)
diff --git a/storage/bdb/cxx/cxx_dbt.cpp b/storage/bdb/cxx/cxx_dbt.cpp
new file mode 100644
index 00000000000..7a4224503ee
--- /dev/null
+++ b/storage/bdb/cxx/cxx_dbt.cpp
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_dbt.cpp,v 11.53 2002/03/27 04:31:14 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+Dbt::Dbt()
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+}
+
+Dbt::Dbt(void *data_arg, u_int32_t size_arg)
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+ set_data(data_arg);
+ set_size(size_arg);
+}
+
+Dbt::~Dbt()
+{
+}
+
+Dbt::Dbt(const Dbt &that)
+{
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+}
+
+Dbt &Dbt::operator = (const Dbt &that)
+{
+ if (this != &that) {
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+ }
+ return (*this);
+}
diff --git a/storage/bdb/cxx/cxx_env.cpp b/storage/bdb/cxx/cxx_env.cpp
new file mode 100644
index 00000000000..c78c6e9fa47
--- /dev/null
+++ b/storage/bdb/cxx/cxx_env.cpp
@@ -0,0 +1,802 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_env.cpp,v 11.88 2002/08/26 22:13:36 mjc Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <stdio.h> // needed for set_error_stream
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc_auto/common_ext.h"
+
+#ifdef HAVE_CXX_STDHEADERS
+using std::cerr;
+#endif
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. They may return an error or raise an exception.
+// These macros expect that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(dbenv, arg)")
+//
+#define DBENV_METHOD_ERR(_name, _argspec, _arglist, _on_err) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ int ret; \
+ \
+ if ((ret = dbenv->_name _arglist) != 0) { \
+ _on_err; \
+ } \
+ return (ret); \
+}
+
+#define DBENV_METHOD(_name, _argspec, _arglist) \
+ DBENV_METHOD_ERR(_name, _argspec, _arglist, \
+ DB_ERROR("DbEnv::" # _name, ret, error_policy()))
+
+#define DBENV_METHOD_QUIET(_name, _argspec, _arglist) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ return (dbenv->_name _arglist); \
+}
+
+#define DBENV_METHOD_VOID(_name, _argspec, _arglist) \
+void DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ dbenv->_name _arglist; \
+}
+
+// This datatype is needed for picky compilers.
+//
+extern "C" {
+ typedef void (*db_errcall_fcn_type)
+ (const char *, char *);
+};
+
+// The reason for a static variable is that some structures
+// (like Dbts) have no connection to any Db or DbEnv, so when
+// errors occur in their methods, we must have some reasonable
+// way to determine whether to throw or return errors.
+//
+// This variable is taken from flags whenever a DbEnv is constructed.
+// Normally there is only one DbEnv per program, and even if not,
+// there is typically a single policy of throwing or returning.
+//
+static int last_known_error_policy = ON_ERROR_UNKNOWN;
+
+__DB_OSTREAMCLASS *DbEnv::error_stream_ = 0;
+
+// These 'glue' function are declared as extern "C" so they will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
+{
+ DbEnv::_feedback_intercept(env, opcode, pct);
+}
+
+extern "C"
+void _paniccall_intercept_c(DB_ENV *env, int errval)
+{
+ DbEnv::_paniccall_intercept(env, errval);
+}
+
+extern "C"
+void _stream_error_function_c(const char *prefix, char *message)
+{
+ DbEnv::_stream_error_function(prefix, message);
+}
+
+extern "C"
+int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op));
+}
+
+extern "C"
+int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+ return (DbEnv::_rep_send_intercept(env,
+ cntrl, data, id, flags));
+}
+
+void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxenv->feedback_callback_ == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL,
+ cxxenv->error_policy());
+ return;
+ }
+ (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
+}
+
+void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->paniccall_callback_ == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ cxxenv->error_policy());
+ }
+ (*cxxenv->paniccall_callback_)(cxxenv, errval);
+}
+
+int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxenv->app_dispatch_callback_ == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, cxxenv->error_policy());
+ return (EINVAL);
+ }
+ Dbt *cxxdbt = (Dbt *)dbt;
+ DbLsn *cxxlsn = (DbLsn *)lsn;
+ return ((*cxxenv->app_dispatch_callback_)(cxxenv, cxxdbt, cxxlsn, op));
+}
+
+int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+
+ if (env == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ const Dbt *cxxcntrl = (const Dbt *)cntrl;
+ Dbt *cxxdata = (Dbt *)data;
+ return ((*cxxenv->rep_send_callback_)(cxxenv,
+ cxxcntrl, cxxdata, id, flags));
+}
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB_ENV handle is invalid and
+// no operations are permitted on the DbEnv (other than
+// destructor). Leaving the DbEnv handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow DbEnv objects to be closed and reopened.
+// This implied always keeping a valid DB_ENV object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+DbEnv::DbEnv(u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(0)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(env)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+// If the DB_ENV handle is still open, we close it. This is to make stack
+// allocation of DbEnv objects easier so that they are cleaned up in the error
+// path. Note that the C layer catches cases where handles are open in the
+// environment at close time and reports an error. Applications should call
+// close explicitly in normal (non-exceptional) cases to check the return
+// value.
+//
+DbEnv::~DbEnv()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ cleanup();
+ (void)env->close(env, 0);
+ }
+}
+
+// called by destructors before the DB_ENV is destroyed.
+void DbEnv::cleanup()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ env->api1_internal = 0;
+ imp_ = 0;
+ }
+}
+
+int DbEnv::close(u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a close (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = env->close(env, flags)) != 0)
+ DB_ERROR("DbEnv::close", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(dbremove,
+ (DbTxn *txn, const char *name, const char *subdb, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, flags))
+DBENV_METHOD(dbrename, (DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, newname, flags))
+
+void DbEnv::err(int error, const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, error, 1, 1, format);
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int DbEnv::error_policy()
+{
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+}
+
+void DbEnv::errx(const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, 0, 0, 1, format);
+}
+
+void *DbEnv::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+// used internally during constructor
+// to associate an existing DB_ENV with this DbEnv,
+// or create a new one.
+//
+int DbEnv::initialize(DB_ENV *env)
+{
+ int ret;
+
+ last_known_error_policy = error_policy();
+
+ if (env == 0) {
+ // Create a new DB_ENV environment.
+ if ((ret = ::db_env_create(&env,
+ construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0)
+ return (ret);
+ }
+ imp_ = wrap(env);
+ env->api1_internal = this; // for DB_ENV* to DbEnv* conversion
+ return (0);
+}
+
+// lock methods
+DBENV_METHOD(lock_detect, (u_int32_t flags, u_int32_t atype, int *aborted),
+ (dbenv, flags, atype, aborted))
+DBENV_METHOD_ERR(lock_get,
+ (u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock),
+ (dbenv, locker, flags, obj, lock_mode, &lock->lock_),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_get", ret,
+ DB_LOCK_GET, lock_mode, obj, *lock,
+ -1, error_policy()))
+DBENV_METHOD(lock_id, (u_int32_t *idp), (dbenv, idp))
+DBENV_METHOD(lock_id_free, (u_int32_t id), (dbenv, id))
+DBENV_METHOD(lock_put, (DbLock *lock), (dbenv, &lock->lock_))
+DBENV_METHOD(lock_stat, (DB_LOCK_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+DBENV_METHOD_ERR(lock_vec,
+ (u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elist_returned),
+ (dbenv, locker, flags, list, nlist, elist_returned),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_vec", ret,
+ (*elist_returned)->op, (*elist_returned)->mode,
+ Dbt::get_Dbt((*elist_returned)->obj), DbLock((*elist_returned)->lock),
+ (*elist_returned) - list, error_policy()))
+// log methods
+DBENV_METHOD(log_archive, (char **list[], u_int32_t flags),
+ (dbenv, list, flags))
+
+int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
+{
+ return (::log_compare(lsn0, lsn1));
+}
+
+// The following cast implies that DbLogc can be no larger than DB_LOGC
+DBENV_METHOD(log_cursor, (DbLogc **cursorp, u_int32_t flags),
+ (dbenv, (DB_LOGC **)cursorp, flags))
+DBENV_METHOD(log_file, (DbLsn *lsn, char *namep, size_t len),
+ (dbenv, lsn, namep, len))
+DBENV_METHOD(log_flush, (const DbLsn *lsn), (dbenv, lsn))
+DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags),
+ (dbenv, lsn, data, flags))
+DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags),
+ (dbenv, spp, flags))
+
+int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int ret;
+ DB_MPOOLFILE *mpf;
+
+ if (env == NULL)
+ ret = EINVAL;
+ else
+ ret = env->memp_fcreate(env, &mpf, flags);
+
+ if (DB_RETOK_STD(ret)) {
+ *dbmfp = new DbMpoolFile();
+ (*dbmfp)->imp_ = wrap(mpf);
+ } else
+ DB_ERROR("DbMpoolFile::f_create", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+DBENV_METHOD(memp_register,
+ (int ftype, pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn),
+ (dbenv, ftype, pgin_fcn, pgout_fcn))
+
+// memory pool methods
+DBENV_METHOD(memp_stat,
+ (DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags),
+ (dbenv, gsp, fsp, flags))
+
+DBENV_METHOD(memp_sync, (DbLsn *sn), (dbenv, sn))
+
+DBENV_METHOD(memp_trickle, (int pct, int *nwrotep), (dbenv, pct, nwrotep))
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = env->open(env, db_home, flags, mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbEnv::open", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::remove(const char *db_home, u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a remove (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((ret = env->remove(env, db_home, flags)) != 0)
+ DB_ERROR("DbEnv::remove", ret, error_policy());
+
+ return (ret);
+}
+
+// Report an error associated with the DbEnv.
+// error_policy is one of:
+// ON_ERROR_THROW throw an error
+// ON_ERROR_RETURN do nothing here, the caller will return an error
+// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
+//
+void DbEnv::runtime_error(const char *caller, int error, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ switch (error) {
+ case DB_LOCK_DEADLOCK:
+ {
+ DbDeadlockException dl_except(caller);
+ throw dl_except;
+ }
+ break;
+ case DB_RUNRECOVERY:
+ {
+ DbRunRecoveryException rr_except(caller);
+ throw rr_except;
+ }
+ break;
+ default:
+ {
+ DbException except(caller, error);
+ throw except;
+ }
+ break;
+ }
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbMemoryException
+// based on the fact that this Dbt is not large enough.
+void DbEnv::runtime_error_dbt(const char *caller, Dbt *dbt, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbMemoryException except(caller, dbt);
+ throw except;
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbLockNotGrantedException,
+// or a regular runtime error.
+// call regular runtime_error if it
+void DbEnv::runtime_error_lock_get(const char *caller, int error,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj,
+ DbLock lock, int index, int error_policy)
+{
+ if (error != DB_LOCK_NOTGRANTED) {
+ runtime_error(caller, error, error_policy);
+ return;
+ }
+
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbLockNotGrantedException except(caller, op, mode,
+ obj, lock, index);
+ throw except;
+ }
+}
+
+// static method
+char *DbEnv::strerror(int error)
+{
+ return (db_strerror(error));
+}
+
+void DbEnv::_stream_error_function(const char *prefix, char *message)
+{
+ // HP compilers need the extra casts, we don't know why.
+ if (error_stream_) {
+ if (prefix) {
+ (*error_stream_) << prefix << (const char *)": ";
+ }
+ if (message) {
+ (*error_stream_) << (const char *)message;
+ }
+ (*error_stream_) << (const char *)"\n";
+ }
+}
+
+// set methods
+
+DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile))
+DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx))
+
+// We keep these alphabetical by field name,
+// for comparison with Java's list.
+//
+DBENV_METHOD(set_data_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_encrypt, (const char *passwd, int flags),
+ (dbenv, passwd, flags))
+DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize))
+DBENV_METHOD(set_lg_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_lg_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lg_regionmax, (u_int32_t regionmax), (dbenv, regionmax))
+DBENV_METHOD(set_lk_detect, (u_int32_t detect), (dbenv, detect))
+DBENV_METHOD(set_lk_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lk_max_lockers, (u_int32_t max_lockers), (dbenv, max_lockers))
+DBENV_METHOD(set_lk_max_locks, (u_int32_t max_locks), (dbenv, max_locks))
+DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects))
+DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize))
+DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir))
+DBENV_METHOD(set_tx_max, (u_int32_t tx_max), (dbenv, tx_max))
+
+DBENV_METHOD_QUIET(set_alloc,
+ (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn,
+ db_free_fcn_type free_fcn),
+ (dbenv, malloc_fcn, realloc_fcn, free_fcn))
+
+void DbEnv::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DBENV_METHOD(set_cachesize,
+ (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (dbenv, gbytes, bytes, ncache))
+
+void DbEnv::set_errcall(void (*arg)(const char *, char *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ // XXX
+ // We are casting from a function ptr declared with C++
+ // linkage to one (same arg types) declared with C
+ // linkage. It's hard to imagine a pair of C/C++
+ // compilers from the same vendor for which this
+ // won't work. Unfortunately, we can't use a
+ // intercept function like the others since the
+ // function does not have a (DbEnv*) as one of
+ // the args. If this causes trouble, we can pull
+ // the same trick we use in Java, namely stuffing
+ // a (DbEnv*) pointer into the prefix. We're
+ // avoiding this for the moment because it obfuscates.
+ //
+ (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
+}
+
+// Note: This actually behaves a bit like a static function,
+// since DB_ENV.db_errcall has no information about which
+// db_env triggered the call. A user that has multiple DB_ENVs
+// will simply not be able to have different streams for each one.
+//
+void DbEnv::set_error_stream(__DB_OSTREAMCLASS *stream)
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ error_stream_ = stream;
+ dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
+ _stream_error_function_c);
+}
+
+int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
+}
+
+DBENV_METHOD(set_flags, (u_int32_t flags, int onoff), (dbenv, flags, onoff))
+DBENV_METHOD(set_lk_conflicts, (u_int8_t *lk_conflicts, int lk_max),
+ (dbenv, lk_conflicts, lk_max))
+
+int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ paniccall_callback_ = arg;
+
+ return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
+}
+
+DBENV_METHOD(set_rpc_server,
+ (void *cl, char *host, long tsec, long ssec, u_int32_t flags),
+ (dbenv, cl, host, tsec, ssec, flags))
+DBENV_METHOD(set_shm_key, (long shm_key), (dbenv, shm_key))
+// Note: this changes from last_known_error_policy to error_policy()
+DBENV_METHOD(set_tas_spins, (u_int32_t arg), (dbenv, arg))
+
+int DbEnv::set_app_dispatch
+ (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ app_dispatch_callback_ = arg;
+ if ((ret = (*(dbenv->set_app_dispatch))(dbenv,
+ _app_dispatch_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_app_dispatch", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(set_tx_timestamp, (time_t *timestamp), (dbenv, timestamp))
+DBENV_METHOD(set_verbose, (u_int32_t which, int onoff), (dbenv, which, onoff))
+
+int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ DB_TXN *txn;
+ int ret;
+
+ ret = env->txn_begin(env, unwrap(pid), &txn, flags);
+ if (DB_RETOK_STD(ret))
+ *tid = new DbTxn(txn);
+ else
+ DB_ERROR("DbEnv::txn_begin", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(txn_checkpoint, (u_int32_t kbyte, u_int32_t min, u_int32_t flags),
+ (dbenv, kbyte, min, flags))
+
+int DbEnv::txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags)
+{
+ DB_ENV *dbenv = unwrap(this);
+ DB_PREPLIST *c_preplist;
+ long i;
+ int ret;
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0)
+ ret = EINVAL;
+ else
+ ret = __os_malloc(dbenv, sizeof(DB_PREPLIST) * count,
+ &c_preplist);
+
+ if (ret != 0) {
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ if ((ret =
+ dbenv->txn_recover(dbenv, c_preplist, count, retp, flags)) != 0) {
+ __os_free(dbenv, c_preplist);
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ for (i = 0; i < *retp; i++) {
+ preplist[i].txn = new DbTxn();
+ preplist[i].txn->imp_ = wrap(c_preplist[i].txn);
+ memcpy(preplist[i].gid, c_preplist[i].gid,
+ sizeof(preplist[i].gid));
+ }
+
+ __os_free(dbenv, c_preplist);
+
+ return (0);
+}
+
+DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+int DbEnv::set_rep_transport(u_int32_t myid,
+ int (*f_send)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ rep_send_callback_ = f_send;
+ if ((ret = dbenv->set_rep_transport(dbenv,
+ myid, _rep_send_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_rep_transport", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_elect,
+ (int nsites, int pri, u_int32_t timeout, int *idp),
+ (dbenv, nsites, pri, timeout, idp))
+
+int DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *idp)
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ ret = dbenv->rep_process_message(dbenv, control, rec, idp);
+ if (!DB_RETOK_REPPMSG(ret))
+ DB_ERROR("DbEnv::rep_process_message", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_start,
+ (Dbt *cookie, u_int32_t flags),
+ (dbenv, (DBT *)cookie, flags))
+
+DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+DBENV_METHOD(set_rep_limit, (u_int32_t gbytes, u_int32_t bytes),
+ (dbenv, gbytes, bytes))
+
+DBENV_METHOD(set_timeout,
+ (db_timeout_t timeout, u_int32_t flags),
+ (dbenv, timeout, flags))
+
+// static method
+char *DbEnv::version(int *major, int *minor, int *patch)
+{
+ return (db_version(major, minor, patch));
+}
+
+// static method
+DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv)
+{
+ DbEnv *wrapped_env = get_DbEnv(dbenv);
+ if (wrapped_env == NULL)
+ wrapped_env = new DbEnv(dbenv, 0);
+ return wrapped_env;
+}
diff --git a/storage/bdb/cxx/cxx_except.cpp b/storage/bdb/cxx/cxx_except.cpp
new file mode 100644
index 00000000000..40fdeae69d6
--- /dev/null
+++ b/storage/bdb/cxx/cxx_except.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_except.cpp,v 11.17 2002/08/23 01:07:27 mjc Exp $";
+#endif /* not lint */
+
+#include <string.h>
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+// tmpString is used to create strings on the stack
+//
+class tmpString
+{
+public:
+ tmpString(const char *str1,
+ const char *str2 = 0,
+ const char *str3 = 0,
+ const char *str4 = 0,
+ const char *str5 = 0);
+ ~tmpString() { delete [] s_; }
+ operator const char *() { return (s_); }
+
+private:
+ char *s_;
+};
+
+tmpString::tmpString(const char *str1,
+ const char *str2,
+ const char *str3,
+ const char *str4,
+ const char *str5)
+{
+ size_t len = strlen(str1);
+ if (str2)
+ len += strlen(str2);
+ if (str3)
+ len += strlen(str3);
+ if (str4)
+ len += strlen(str4);
+ if (str5)
+ len += strlen(str5);
+
+ s_ = new char[len+1];
+
+ strcpy(s_, str1);
+ if (str2)
+ strcat(s_, str2);
+ if (str3)
+ strcat(s_, str3);
+ if (str4)
+ strcat(s_, str4);
+ if (str5)
+ strcat(s_, str5);
+}
+
+// Note: would not be needed if we can inherit from exception
+// It does not appear to be possible to inherit from exception
+// with the current Microsoft library (VC5.0).
+//
+static char *dupString(const char *s)
+{
+ char *r = new char[strlen(s)+1];
+ strcpy(r, s);
+ return (r);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbException::~DbException()
+{
+ if (what_)
+ delete [] what_;
+}
+
+DbException::DbException(int err)
+: err_(err)
+{
+ what_ = dupString(db_strerror(err));
+}
+
+DbException::DbException(const char *description)
+: err_(0)
+{
+ what_ = dupString(tmpString(description));
+}
+
+DbException::DbException(const char *prefix, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix, ": ", db_strerror(err)));
+}
+
+DbException::DbException(const char *prefix1, const char *prefix2, int err)
+: err_(err)
+{
+ what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ",
+ db_strerror(err)));
+}
+
+DbException::DbException(const DbException &that)
+: err_(that.err_)
+{
+ what_ = dupString(that.what_);
+}
+
+DbException &DbException::operator = (const DbException &that)
+{
+ if (this != &that) {
+ err_ = that.err_;
+ if (what_)
+ delete [] what_;
+ what_ = 0; // in case new throws exception
+ what_ = dupString(that.what_);
+ }
+ return (*this);
+}
+
+int DbException::get_errno() const
+{
+ return (err_);
+}
+
+const char *DbException::what() const
+{
+ return (what_);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMemoryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+static const char *memory_err_desc = "Dbt not large enough for available data";
+DbMemoryException::~DbMemoryException()
+{
+}
+
+DbMemoryException::DbMemoryException(Dbt *dbt)
+: DbException(memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *description)
+: DbException(description, ENOMEM)
+, dbt_(0)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix, Dbt *dbt)
+: DbException(prefix, memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix1, const char *prefix2,
+ Dbt *dbt)
+: DbException(prefix1, prefix2, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const DbMemoryException &that)
+: DbException(that)
+, dbt_(that.dbt_)
+{
+}
+
+DbMemoryException
+&DbMemoryException::operator =(const DbMemoryException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ dbt_ = that.dbt_;
+ }
+ return (*this);
+}
+
+Dbt *DbMemoryException::get_dbt() const
+{
+ return (dbt_);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbDeadlockException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbDeadlockException::~DbDeadlockException()
+{
+}
+
+DbDeadlockException::DbDeadlockException(const char *description)
+: DbException(description, DB_LOCK_DEADLOCK)
+{
+}
+
+DbDeadlockException::DbDeadlockException(const DbDeadlockException &that)
+: DbException(that)
+{
+}
+
+DbDeadlockException
+&DbDeadlockException::operator =(const DbDeadlockException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLockNotGrantedException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLockNotGrantedException::~DbLockNotGrantedException()
+{
+ delete lock_;
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException(const char *prefix,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj, const DbLock lock,
+ int index)
+: DbException(prefix, DbEnv::strerror(DB_LOCK_NOTGRANTED),
+ DB_LOCK_NOTGRANTED)
+, op_(op)
+, mode_(mode)
+, obj_(obj)
+, index_(index)
+{
+ lock_ = new DbLock(lock);
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException
+ (const DbLockNotGrantedException &that)
+: DbException(that)
+{
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+}
+
+DbLockNotGrantedException
+&DbLockNotGrantedException::operator =(const DbLockNotGrantedException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+ }
+ return (*this);
+}
+
+db_lockop_t DbLockNotGrantedException::get_op() const
+{
+ return op_;
+}
+
+db_lockmode_t DbLockNotGrantedException::get_mode() const
+{
+ return mode_;
+}
+
+const Dbt* DbLockNotGrantedException::get_obj() const
+{
+ return obj_;
+}
+
+DbLock* DbLockNotGrantedException::get_lock() const
+{
+ return lock_;
+}
+
+int DbLockNotGrantedException::get_index() const
+{
+ return index_;
+}
+
+
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbRunRecoveryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbRunRecoveryException::~DbRunRecoveryException()
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException(const char *description)
+: DbException(description, DB_RUNRECOVERY)
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException
+ (const DbRunRecoveryException &that)
+: DbException(that)
+{
+}
+
+DbRunRecoveryException
+&DbRunRecoveryException::operator =(const DbRunRecoveryException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
diff --git a/storage/bdb/cxx/cxx_lock.cpp b/storage/bdb/cxx/cxx_lock.cpp
new file mode 100644
index 00000000000..446eba49e27
--- /dev/null
+++ b/storage/bdb/cxx/cxx_lock.cpp
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_lock.cpp,v 11.17 2002/03/27 04:31:16 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLock //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLock::DbLock(DB_LOCK value)
+: lock_(value)
+{
+}
+
+DbLock::DbLock()
+{
+ memset(&lock_, 0, sizeof(DB_LOCK));
+}
+
+DbLock::DbLock(const DbLock &that)
+: lock_(that.lock_)
+{
+}
+
+DbLock &DbLock::operator = (const DbLock &that)
+{
+ lock_ = that.lock_;
+ return (*this);
+}
diff --git a/storage/bdb/cxx/cxx_logc.cpp b/storage/bdb/cxx/cxx_logc.cpp
new file mode 100644
index 00000000000..d1fe83dd58b
--- /dev/null
+++ b/storage/bdb/cxx/cxx_logc.cpp
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_logc.cpp,v 11.8 2002/07/03 21:03:53 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// It's private, and should never be called,
+// but some compilers need it resolved
+//
+DbLogc::~DbLogc()
+{
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::close(u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->close(logc, _flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbLogc::close", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->get(logc, lsn, data, _flags);
+
+ if (!DB_RETOK_LGGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("DbLogc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("DbLogc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/cxx/cxx_mpool.cpp b/storage/bdb/cxx/cxx_mpool.cpp
new file mode 100644
index 00000000000..3eb78d03ff4
--- /dev/null
+++ b/storage/bdb/cxx/cxx_mpool.cpp
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_mpool.cpp,v 11.20 2002/07/03 21:03:53 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(mpf, arg)")
+//
+#define DB_MPOOLFILE_METHOD(_name, _argspec, _arglist, _retok) \
+int DbMpoolFile::_name _argspec \
+{ \
+ int ret; \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ if (mpf == NULL) \
+ ret = EINVAL; \
+ else \
+ ret = mpf->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("DbMpoolFile::"#_name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+#define DB_MPOOLFILE_METHOD_VOID(_name, _argspec, _arglist) \
+void DbMpoolFile::_name _argspec \
+{ \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ mpf->_name _arglist; \
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMpoolFile //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbMpoolFile::DbMpoolFile()
+: imp_(0)
+{
+}
+
+DbMpoolFile::~DbMpoolFile()
+{
+}
+
+int DbMpoolFile::close(u_int32_t flags)
+{
+ DB_MPOOLFILE *mpf = unwrap(this);
+ int ret;
+
+ if (mpf == NULL)
+ ret = EINVAL;
+ else
+ ret = mpf->close(mpf, flags);
+
+ imp_ = 0; // extra safety
+
+ // This may seem weird, but is legal as long as we don't access
+ // any data before returning.
+ delete this;
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbMpoolFile::close", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+DB_MPOOLFILE_METHOD(get, (db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep),
+ (mpf, pgnoaddr, flags, pagep), DB_RETOK_MPGET)
+DB_MPOOLFILE_METHOD_VOID(last_pgno, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(open,
+ (const char *file, u_int32_t flags, int mode, size_t pagesize),
+ (mpf, file, flags, mode, pagesize), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(put, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(refcnt, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(set, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_clear_len, (u_int32_t len),
+ (mpf, len), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_fileid, (u_int8_t *fileid),
+ (mpf, fileid), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_ftype, (int ftype),
+ (mpf, ftype), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_lsn_offset, (int32_t offset),
+ (mpf, offset), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_pgcookie, (DBT *dbt),
+ (mpf, dbt), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(set_unlink, (int ul), (mpf, ul))
+DB_MPOOLFILE_METHOD(sync, (),
+ (mpf), DB_RETOK_STD)
diff --git a/storage/bdb/cxx/cxx_txn.cpp b/storage/bdb/cxx/cxx_txn.cpp
new file mode 100644
index 00000000000..b04077c0f5b
--- /dev/null
+++ b/storage/bdb/cxx/cxx_txn.cpp
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_txn.cpp,v 11.27 2002/07/20 13:50:11 dda Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \
+int DbTxn::_name _argspec \
+{ \
+ int ret; \
+ DB_TXN *txn = unwrap(this); \
+ \
+ ret = txn->_name _arglist; \
+ /* Weird, but safe if we don't access this again. */ \
+ if (_delete) \
+ delete this; \
+ if (!DB_RETOK_STD(ret)) \
+ DB_ERROR("DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+// private constructor, never called but needed by some C++ linkers
+DbTxn::DbTxn()
+: imp_(0)
+{
+}
+
+DbTxn::DbTxn(DB_TXN *txn)
+: imp_(wrap(txn))
+{
+ txn->api_internal = this;
+}
+
+DbTxn::~DbTxn()
+{
+}
+
+DBTXN_METHOD(abort, 1, (), (txn))
+DBTXN_METHOD(commit, 1, (u_int32_t flags), (txn, flags))
+DBTXN_METHOD(discard, 1, (u_int32_t flags), (txn, flags))
+
+u_int32_t DbTxn::id()
+{
+ DB_TXN *txn;
+
+ txn = unwrap(this);
+ return (txn->id(txn)); // no error
+}
+
+DBTXN_METHOD(prepare, 0, (u_int8_t *gid), (txn, gid))
+DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags),
+ (txn, timeout, flags))
+
+// static method
+DbTxn *DbTxn::wrap_DB_TXN(DB_TXN *txn)
+{
+ DbTxn *wrapped_txn = get_DbTxn(txn);
+ if (wrapped_txn == NULL)
+ wrapped_txn = new DbTxn(txn);
+ return wrapped_txn;
+}
diff --git a/storage/bdb/db/crdel.src b/storage/bdb/db/crdel.src
new file mode 100644
index 00000000000..d89fa7a0382
--- /dev/null
+++ b/storage/bdb/db/crdel.src
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: crdel.src,v 11.24 2002/04/17 19:02:57 krinsky Exp $
+ */
+
+PREFIX __crdel
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * Metasub: log the creation of a subdatabase meta data page.
+ *
+ * fileid: identifies the file being acted upon.
+ * pgno: page number on which to write this meta-data page
+ * page: the actual meta-data page
+ * lsn: lsn of the page.
+ */
+BEGIN metasub 142
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT page DBT s
+POINTER lsn DB_LSN * lu
+END
+
diff --git a/storage/bdb/db/crdel_rec.c b/storage/bdb/db/crdel_rec.c
new file mode 100644
index 00000000000..542a0c358dd
--- /dev/null
+++ b/storage/bdb/db/crdel_rec.c
@@ -0,0 +1,97 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: crdel_rec.c,v 11.64 2002/08/14 20:27:34 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+
+/*
+ * __crdel_metasub_recover --
+ * Recovery function for metasub.
+ *
+ * PUBLIC: int __crdel_metasub_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __crdel_metasub_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__crdel_metasub_print);
+ REC_INTRO(__crdel_metasub_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ memcpy(pagep, argp->page.data, argp->page.size);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ } else if (DB_UNDO(op)) {
+ /*
+ * We want to undo this page creation. The page creation
+ * happened in two parts. First, we called __bam_new which
+ * was logged separately. Then we wrote the meta-data onto
+ * the page. So long as we restore the LSN, then the recovery
+ * for __bam_new will do everything else.
+ * Don't bother checking the lsn on the page. If we
+ * are rolling back the next thing is that this page
+ * will get freed. Opening the subdb will have reinitialized
+ * the page, but not the lsn.
+ */
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
diff --git a/storage/bdb/db/db.c b/storage/bdb/db/db.c
new file mode 100644
index 00000000000..986167d5ade
--- /dev/null
+++ b/storage/bdb/db/db.c
@@ -0,0 +1,1308 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db.c,v 11.246 2002/08/20 14:40:00 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_disassociate __P((DB *));
+#if CONFIG_TEST
+static void __db_makecopy __P((const char *, const char *));
+static int __db_testdocopy __P((DB_ENV *, const char *));
+static int __qam_testdocopy __P((DB *, const char *));
+#endif
+
+/*
+ * DB.C --
+ * This file contains the utility functions for the DBP layer.
+ */
+
+/*
+ * __db_master_open --
+ * Open up a handle on a master database.
+ *
+ * PUBLIC: int __db_master_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, int, DB **));
+ */
+int
+__db_master_open(subdbp, txn, name, flags, mode, dbpp)
+ DB *subdbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ /* Open up a handle on the main database. */
+ if ((ret = db_create(&dbp, subdbp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * It's always a btree.
+ * Run in the transaction we've created.
+ * Set the pagesize in case we're creating a new database.
+ * Flag that we're creating a database with subdatabases.
+ */
+ dbp->type = DB_BTREE;
+ dbp->pgsize = subdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+ F_SET(dbp, F_ISSET(subdbp,
+ DB_AM_RECOVER | DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM));
+
+ /*
+ * If there was a subdb specified, then we only want to apply
+ * DB_EXCL to the subdb, not the actual file. We only got here
+ * because there was a subdb specified.
+ */
+ LF_CLR(DB_EXCL);
+ LF_SET(DB_RDWRMASTER);
+ if ((ret = __db_dbopen(dbp, txn, name, NULL, flags, mode, PGNO_BASE_MD))
+ != 0)
+ goto err;
+
+ /*
+ * Verify that pagesize is the same on both.
+ * The items in dbp were now initialized from the meta
+ * page. The items in dbp were set in __db_dbopen
+ * when we either read or created the master file.
+ * Other items such as checksum and encryption are
+ * checked when we read the meta-page. So we do not
+ * check those here. However, if the meta-page caused
+ * chksumming to be turned on and it wasn't already, set
+ * it here.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ F_SET(subdbp, DB_AM_CHKSUM);
+ if (subdbp->pgsize != 0 && dbp->pgsize != subdbp->pgsize) {
+ ret = EINVAL;
+ __db_err(dbp->dbenv,
+ "Different pagesize specified on existent file");
+ goto err;
+ }
+err:
+ if (ret != 0 && !F_ISSET(dbp, DB_AM_DISCARD))
+ __db_close_i(dbp, txn, 0);
+ else
+ *dbpp = dbp;
+ return (ret);
+}
+
+/*
+ * __db_master_update --
+ * Add/Open/Remove a subdatabase from a master database.
+ *
+ * PUBLIC: int __db_master_update __P((DB *, DB *, DB_TXN *, const char *,
+ * PUBLIC: DBTYPE, mu_action, const char *, u_int32_t));
+ */
+int
+__db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags)
+ DB *mdbp, *sdbp;
+ DB_TXN *txn;
+ const char *subdb;
+ DBTYPE type;
+ mu_action action;
+ const char *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc, *ndbc;
+ DBT key, data, ndata;
+ PAGE *p;
+ db_pgno_t t_pgno;
+ int modify, ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ dbc = ndbc = NULL;
+ p = NULL;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Might we modify the master database? If so, we'll need to lock. */
+ modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0;
+
+ /*
+ * Open up a cursor. If this is CDB and we're creating the database,
+ * make it an update cursor.
+ */
+ if ((ret = mdbp->cursor(mdbp, txn, &dbc,
+ (CDB_LOCKING(dbenv) && modify) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+
+ /*
+ * Point the cursor at the record.
+ *
+ * If we're removing or potentially creating an entry, lock the page
+ * with DB_RMW.
+ *
+ * We do multiple cursor operations with the cursor in some cases and
+ * subsequently access the data DBT information. Set DB_DBT_MALLOC so
+ * we don't risk modification of the data between our uses of it.
+ *
+ * !!!
+ * We don't include the name's nul termination in the database.
+ */
+ key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
+ F_SET(&data, DB_DBT_MALLOC);
+
+ ret = dbc->c_get(dbc, &key, &data,
+ DB_SET | ((STD_LOCKING(dbc) && modify) ? DB_RMW : 0));
+
+ /*
+ * What we do next--whether or not we found a record for the
+ * specified subdatabase--depends on what the specified action is.
+ * Handle ret appropriately as the first statement of each case.
+ */
+ switch (action) {
+ case MU_REMOVE:
+ /*
+ * We should have found something if we're removing it. Note
+ * that in the common case where the DB we're asking to remove
+ * doesn't exist, we won't get this far; __db_subdb_remove
+ * will already have returned an error from __db_open.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Delete the subdatabase entry first; if this fails,
+ * we don't want to touch the actual subdb pages.
+ */
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We're handling actual data, not on-page meta-data,
+ * so it hasn't been converted to/from opposite
+ * endian architectures. Do it explicitly, now.
+ */
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
+ if ((ret =
+ mdbp->mpf->get(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0)
+ goto err;
+
+ /* Free and put the page. */
+ if ((ret = __db_free(dbc, p)) != 0) {
+ p = NULL;
+ goto err;
+ }
+ p = NULL;
+ break;
+ case MU_RENAME:
+ /* We should have found something if we're renaming it. */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * Before we rename, we need to make sure we're not
+ * overwriting another subdatabase, or else this operation
+ * won't be undoable. Open a second cursor and check
+ * for the existence of newname; it shouldn't appear under
+ * us since we hold the metadata lock.
+ */
+ if ((ret = mdbp->cursor(mdbp, txn, &ndbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(newname != NULL);
+ key.data = (void *)newname;
+ key.size = (u_int32_t)strlen(newname);
+
+ /*
+ * We don't actually care what the meta page of the potentially-
+ * overwritten DB is; we just care about existence.
+ */
+ memset(&ndata, 0, sizeof(ndata));
+ F_SET(&ndata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ if ((ret = ndbc->c_get(ndbc, &key, &ndata, DB_SET)) == 0) {
+ /* A subdb called newname exists. Bail. */
+ ret = EEXIST;
+ __db_err(dbenv, "rename: database %s exists", newname);
+ goto err;
+ } else if (ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Now do the put first; we don't want to lose our
+ * sole reference to the subdb. Use the second cursor
+ * so that the first one continues to point to the old record.
+ */
+ if ((ret = ndbc->c_put(ndbc, &key, &data, DB_KEYFIRST)) != 0)
+ goto err;
+ if ((ret = dbc->c_del(dbc, 0)) != 0) {
+ /*
+ * If the delete fails, try to delete the record
+ * we just put, in case we're not txn-protected.
+ */
+ (void)ndbc->c_del(ndbc, 0);
+ goto err;
+ }
+
+ break;
+ case MU_OPEN:
+ /*
+ * Get the subdatabase information. If it already exists,
+ * copy out the page number and we're done.
+ */
+ switch (ret) {
+ case 0:
+ if (LF_ISSET(DB_CREATE) && LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
+ goto done;
+ case DB_NOTFOUND:
+ if (LF_ISSET(DB_CREATE))
+ break;
+ /*
+ * No db_err, it is reasonable to remove a
+ * nonexistent db.
+ */
+ ret = ENOENT;
+ goto err;
+ default:
+ goto err;
+ }
+
+ /*
+ * We need to check against the master lorder here because
+ * we only want to check this if we are creating. In the
+ * case where we don't create we just want to inherit.
+ */
+ if (F_ISSET(mdbp, DB_AM_SWAP) != F_ISSET(sdbp, DB_AM_SWAP)) {
+ ret = EINVAL;
+ __db_err(mdbp->dbenv,
+ "Different lorder specified on existent file");
+ goto err;
+ }
+ /* Create a subdatabase. */
+ if ((ret = __db_new(dbc,
+ type == DB_HASH ? P_HASHMETA : P_BTREEMETA, &p)) != 0)
+ goto err;
+ sdbp->meta_pgno = PGNO(p);
+
+ /*
+ * XXX
+ * We're handling actual data, not on-page meta-data, so it
+ * hasn't been converted to/from opposite endian architectures.
+ * Do it explicitly, now.
+ */
+ t_pgno = PGNO(p);
+ DB_HTONL(&t_pgno);
+ memset(&ndata, 0, sizeof(ndata));
+ ndata.data = &t_pgno;
+ ndata.size = sizeof(db_pgno_t);
+ if ((ret = dbc->c_put(dbc, &key, &ndata, DB_KEYLAST)) != 0)
+ goto err;
+ F_SET(sdbp, DB_AM_CREATED);
+ break;
+ }
+
+err:
+done: /*
+ * If we allocated a page: if we're successful, mark the page dirty
+ * and return it to the cache, otherwise, discard/free it.
+ */
+ if (p != NULL) {
+ if (ret == 0) {
+ if ((t_ret =
+ mdbp->mpf->put(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0)
+ ret = t_ret;
+ /*
+ * Since we cannot close this file until after
+ * transaction commit, we need to sync the dirty
+ * pages, because we'll read these directly from
+ * disk to open.
+ */
+ if ((t_ret = mdbp->sync(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ } else
+ (void)mdbp->mpf->put(mdbp->mpf, p, 0);
+ }
+
+ /* Discard the cursor(s) and data. */
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ndbc != NULL && (t_ret = ndbc->c_close(ndbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_dbenv_setup --
+ * Set up the underlying environment during a db_open.
+ *
+ * PUBLIC: int __db_dbenv_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, u_int32_t));
+ */
+int
+__db_dbenv_setup(dbp, txn, name, id, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t id;
+ u_int32_t flags;
+{
+ DB *ldbp;
+ DBT pgcookie;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ u_int32_t maxid;
+ int ftype, ret;
+
+ dbenv = dbp->dbenv;
+
+ /* If we don't yet have an environment, it's time to create it. */
+ if (!F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ /* Make sure we have at least DB_MINCACHE pages in our cache. */
+ if (dbenv->mp_gbytes == 0 &&
+ dbenv->mp_bytes < dbp->pgsize * DB_MINPAGECACHE &&
+ (ret = dbenv->set_cachesize(
+ dbenv, 0, dbp->pgsize * DB_MINPAGECACHE, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbenv->open(dbenv, NULL, DB_CREATE |
+ DB_INIT_MPOOL | DB_PRIVATE | LF_ISSET(DB_THREAD), 0)) != 0)
+ return (ret);
+ }
+
+ /* Register DB's pgin/pgout functions. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
+ return (ret);
+
+ /* Create the DB_MPOOLFILE structure. */
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbp->mpf, 0)) != 0)
+ return (ret);
+ mpf = dbp->mpf;
+
+ /* Set the database's cache priority if we've been given one. */
+ if (dbp->priority != 0 &&
+ (ret = mpf->set_priority(mpf, dbp->priority)) != 0)
+ return (ret);
+
+ /*
+ * Open a backing file in the memory pool.
+ *
+ * If we need to pre- or post-process a file's pages on I/O, set the
+ * file type. If it's a hash file, always call the pgin and pgout
+ * routines. This means that hash files can never be mapped into
+ * process memory. If it's a btree file and requires swapping, we
+ * need to page the file in and out. This has to be right -- we can't
+ * mmap files that are being paged in and out.
+ */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
+ break;
+ case DB_HASH:
+ (void)mpf->set_ftype(mpf, DB_FTYPE_SET);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
+ break;
+ case DB_QUEUE:
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_QUEUE_LEN));
+ break;
+ case DB_UNKNOWN:
+ /*
+ * If we're running in the verifier, our database might
+ * be corrupt and we might not know its type--but we may
+ * still want to be able to verify and salvage.
+ *
+ * If we can't identify the type, it's not going to be safe
+ * to call __db_pgin--we pretty much have to give up all
+ * hope of salvaging cross-endianness. Proceed anyway;
+ * at worst, the database will just appear more corrupt
+ * than it actually is, but at best, we may be able
+ * to salvage some data even with no metadata page.
+ */
+ if (F_ISSET(dbp, DB_AM_VERIFYING)) {
+ (void)mpf->set_ftype(mpf, DB_FTYPE_NOTSET);
+ (void)mpf->set_clear_len(mpf, DB_PAGE_DB_LEN);
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return (
+ __db_unknown_type(dbenv, "__db_dbenv_setup", dbp->type));
+ }
+
+ (void)mpf->set_fileid(mpf, dbp->fileid);
+ (void)mpf->set_lsn_offset(mpf, 0);
+
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
+ pgcookie.data = &pginfo;
+ pgcookie.size = sizeof(DB_PGINFO);
+ (void)mpf->set_pgcookie(mpf, &pgcookie);
+
+ if ((ret = mpf->open(mpf, name,
+ LF_ISSET(DB_RDONLY | DB_NOMMAP | DB_ODDFILESIZE | DB_TRUNCATE) |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_DIRECT : 0),
+ 0, dbp->pgsize)) != 0)
+ return (ret);
+
+ /*
+ * We may need a per-thread mutex. Allocate it from the mpool
+ * region, there's supposed to be extra space there for that purpose.
+ */
+ if (LF_ISSET(DB_THREAD)) {
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Set up a bookkeeping entry for this database in the log region,
+ * if such a region exists. Note that even if we're in recovery
+ * or a replication client, where we won't log registries, we'll
+ * still need an FNAME struct, so LOGGING_ON is the correct macro.
+ */
+ if (LOGGING_ON(dbenv) &&
+ (ret = __dbreg_setup(dbp, name, id)) != 0)
+ return (ret);
+
+ /*
+ * If we're actively logging and our caller isn't a recovery function
+ * that already did so, assign this dbp a log fileid.
+ */
+ if (DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) &&
+#if !defined(DEBUG_ROP)
+ !F_ISSET(dbp, DB_AM_RDONLY) &&
+#endif
+ (ret = __dbreg_new_id(dbp, txn)) != 0)
+ return (ret);
+
+ /*
+ * Insert ourselves into the DB_ENV's dblist. We allocate a
+ * unique ID to each {fileid, meta page number} pair, and to
+ * each temporary file (since they all have a zero fileid).
+ * This ID gives us something to use to tell which DB handles
+ * go with which databases in all the cursor adjustment
+ * routines, where we don't want to do a lot of ugly and
+ * expensive memcmps.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (maxid = 0, ldbp = LIST_FIRST(&dbenv->dblist);
+ ldbp != NULL; ldbp = LIST_NEXT(dbp, dblistlinks)) {
+ if (name != NULL &&
+ memcmp(ldbp->fileid, dbp->fileid, DB_FILE_ID_LEN) == 0 &&
+ ldbp->meta_pgno == dbp->meta_pgno)
+ break;
+ if (ldbp->adj_fileid > maxid)
+ maxid = ldbp->adj_fileid;
+ }
+
+ /*
+ * If ldbp is NULL, we didn't find a match, or we weren't
+ * really looking because name is NULL. Assign the dbp an
+ * adj_fileid one higher than the largest we found, and
+ * insert it at the head of the master dbp list.
+ *
+ * If ldbp is not NULL, it is a match for our dbp. Give dbp
+ * the same ID that ldbp has, and add it after ldbp so they're
+ * together in the list.
+ */
+ if (ldbp == NULL) {
+ dbp->adj_fileid = maxid + 1;
+ LIST_INSERT_HEAD(&dbenv->dblist, dbp, dblistlinks);
+ } else {
+ dbp->adj_fileid = ldbp->adj_fileid;
+ LIST_INSERT_AFTER(ldbp, dbp, dblistlinks);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ return (0);
+}
+
+/*
+ * __db_close --
+ * DB destructor.
+ *
+ * PUBLIC: int __db_close __P((DB *, u_int32_t));
+ */
+int
+__db_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments, but as a DB handle destructor, we can't fail. */
+ if (flags != 0 && flags != DB_NOSYNC)
+ (void)__db_ferr(dbenv, "DB->close", 0);
+
+ return (__db_close_i(dbp, NULL, flags));
+}
+
+/*
+ * __db_close_i --
+ * Internal DB destructor.
+ *
+ * PUBLIC: int __db_close_i __P((DB *, DB_TXN *, u_int32_t));
+ */
+int
+__db_close_i(dbp, txn, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ /*
+ * Validate arguments, but as a DB handle destructor, we can't fail.
+ *
+ * Check for consistent transaction usage -- ignore errors. Only
+ * internal callers specify transactions, so it's a serious problem
+ * if we get error messages.
+ */
+ if (txn != NULL)
+ (void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0);
+
+ /* Refresh the structure and close any local environment. */
+ if ((t_ret = __db_refresh(dbp, txn, flags)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Call the access specific close function.
+ *
+ * !!!
+ * Because of where these functions are called in the DB handle close
+ * process, these routines can't do anything that would dirty pages or
+ * otherwise affect closing down the database. Specifically, we can't
+ * abort and recover any of the information they control.
+ */
+ if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __bam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __qam_db_close(dbp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ --dbenv->db_ref;
+ if (F_ISSET(dbenv, DB_ENV_DBLOCAL) && dbenv->db_ref == 0 &&
+ (t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Free the database handle. */
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(dbenv, dbp);
+
+ return (ret);
+}
+
+/*
+ * __db_refresh --
+ * Refresh the DB structure, releasing any allocated resources.
+ * This does most of the work of closing files now because refresh
+ * is what is used during abort processing (since we can't destroy
+ * the actual handle) and during abort processing, we may have a
+ * fully opened handle.
+ *
+ * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t));
+ */
+int
+__db_refresh(dbp, txn, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t flags;
+{
+ DB *sdbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCKREQ lreq;
+ DB_MPOOL *dbmp;
+ int ret, t_ret;
+
+ ret = 0;
+
+ dbenv = dbp->dbenv;
+
+ /* If never opened, or not currently open, it's easy. */
+ if (!F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ goto never_opened;
+
+ /*
+ * If we have any secondary indices, disassociate them from us.
+ * We don't bother with the mutex here; it only protects some
+ * of the ops that will make us core-dump mid-close anyway, and
+ * if you're trying to do something with a secondary *while* you're
+ * closing the primary, you deserve what you get. The disassociation
+ * is mostly done just so we can close primaries and secondaries in
+ * any order--but within one thread of control.
+ */
+ for (sdbp = LIST_FIRST(&dbp->s_secondaries);
+ sdbp != NULL; sdbp = LIST_NEXT(sdbp, s_links)) {
+ LIST_REMOVE(sdbp, s_links);
+ if ((t_ret = __db_disassociate(sdbp)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /*
+ * Sync the underlying access method. Do before closing the cursors
+ * because DB->sync allocates cursors in order to write Recno backing
+ * source text files.
+ */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->sync(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine. Note that any failure on a close is considered "really
+ * bad" and we just break out of the loop and force forward.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ /*
+ * Close any outstanding join cursors. Join cursors destroy
+ * themselves on close and have no separate destroy routine.
+ */
+ while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
+
+ /*
+ * Sync the memory pool, even though we've already called DB->sync,
+ * because closing cursors can dirty pages by deleting items they
+ * referenced.
+ */
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Close any handle we've been holding since the open. */
+ if (dbp->saved_open_fhp != NULL &&
+ F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbp->saved_open_fhp)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+never_opened:
+ /*
+ * We are not releasing the handle lock here because we're about
+ * to release all locks held by dbp->lid below. There are two
+ * ways that we can get in here with a handle_lock, but not a
+ * dbp->lid. The first is when our lid has been hijacked by a
+ * subdb. The second is when we are a Queue database in the midst
+ * of a rename. If the queue file hasn't actually been opened, we
+ * hijack the main dbp's locker id to do the open so we can get the
+ * extent files. In both cases, we needn't free the handle lock
+ * because it will be freed when the hijacked locker-id is freed.
+ */
+ DB_ASSERT(!LOCK_ISSET(dbp->handle_lock) ||
+ dbp->lid != DB_LOCK_INVALIDID ||
+ dbp->type == DB_QUEUE ||
+ F_ISSET(dbp, DB_AM_SUBDB));
+
+ if (dbp->lid != DB_LOCK_INVALIDID) {
+ /* We may have pending trade operations on this dbp. */
+ if (txn != NULL)
+ __txn_remlock(dbenv, txn, &dbp->handle_lock, dbp->lid);
+
+ /* We may be holding the handle lock; release it. */
+ lreq.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = __lock_vec(dbenv,
+ dbp->lid, 0, &lreq, 1, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, dbp->lid)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
+ }
+
+ /* Discard the locker ID allocated as the fileid. */
+ if (F_ISSET(dbp, DB_AM_INMEM) &&
+ LOCKING_ON(dbenv) && (t_ret = dbenv->lock_id_free(
+ dbenv, *(u_int32_t *)dbp->fileid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->type = DB_UNKNOWN;
+
+ /* Discard the thread mutex. */
+ if (dbp->mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbp->mutexp);
+ dbp->mutexp = NULL;
+ }
+
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
+
+ /* For safety's sake; we may refresh twice. */
+ memset(&dbp->my_rskey, 0, sizeof(DBT));
+ memset(&dbp->my_rkey, 0, sizeof(DBT));
+ memset(&dbp->my_rdata, 0, sizeof(DBT));
+
+ /*
+ * Remove this DB handle from the DB_ENV's dblist, if it's been added.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ if (dbp->dblistlinks.le_prev != NULL)
+ LIST_REMOVE(dbp, dblistlinks);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ dbp->dblistlinks.le_prev = NULL;
+
+ /* Close the memory pool file handle. */
+ if (dbp->mpf != NULL) {
+ if ((t_ret = dbp->mpf->close(dbp->mpf,
+ F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+ dbp->mpf = NULL;
+ }
+
+ if (LOGGING_ON(dbp->dbenv)) {
+ /*
+ * Discard the log file id, if any. We want to log the close
+ * if and only if this is not a recovery dbp.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ (void)__dbreg_revoke_id(dbp, 0);
+ else
+ (void)__dbreg_close_id(dbp, txn);
+
+ /* Discard the log FNAME. */
+ (void)__dbreg_teardown(dbp);
+ }
+
+ /* Clear out fields that normally get set during open. */
+ memset(dbp->fileid, 0, sizeof(dbp->fileid));
+ dbp->adj_fileid = 0;
+ dbp->meta_pgno = 0;
+ dbp->cur_lid = DB_LOCK_INVALIDID;
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+ dbp->cl_id = 0;
+
+ /*
+ * If we are being refreshed with a txn specified, then we need
+ * to make sure that we clear out the lock handle field, because
+ * releasing all the locks for this transaction will release this
+ * lock and we don't want close to stumble upon this handle and
+ * try to close it.
+ */
+ if (txn != NULL)
+ LOCK_INIT(dbp->handle_lock);
+
+ F_CLR(dbp, DB_AM_DBM_ERROR);
+ F_CLR(dbp, DB_AM_DISCARD);
+ F_CLR(dbp, DB_AM_INMEM);
+ F_CLR(dbp, DB_AM_RECOVER);
+ F_CLR(dbp, DB_AM_OPEN_CALLED);
+ F_CLR(dbp, DB_AM_RDONLY);
+ F_CLR(dbp, DB_AM_SWAP);
+
+ return (ret);
+}
+
+/*
+ * __db_log_page
+ * Log a meta-data or root page during a subdatabase create operation.
+ *
+ * PUBLIC: int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *));
+ */
+int
+__db_log_page(dbp, txn, lsn, pgno, page)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_LSN *lsn;
+ db_pgno_t pgno;
+ PAGE *page;
+{
+ DBT page_dbt;
+ DB_LSN new_lsn;
+ int ret;
+
+ if (!LOGGING_ON(dbp->dbenv) || txn == NULL)
+ return (0);
+
+ memset(&page_dbt, 0, sizeof(page_dbt));
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = page;
+
+ ret = __crdel_metasub_log(dbp, txn, &new_lsn, 0, pgno, &page_dbt, lsn);
+
+ if (ret == 0)
+ page->lsn = new_lsn;
+ return (ret);
+}
+
+/*
+ * __db_backup_name
+ * Create the backup file name for a given file.
+ *
+ * PUBLIC: int __db_backup_name __P((DB_ENV *,
+ * PUBLIC: const char *, DB_TXN *, char **));
+ */
+#undef BACKUP_PREFIX
+#define BACKUP_PREFIX "__db."
+
+#undef MAX_LSN_TO_TEXT
+#define MAX_LSN_TO_TEXT 17
+
+int
+__db_backup_name(dbenv, name, txn, backup)
+ DB_ENV *dbenv;
+ const char *name;
+ DB_TXN *txn;
+ char **backup;
+{
+ DB_LSN lsn;
+ size_t len;
+ int plen, ret;
+ char *p, *retp;
+
+ /*
+ * Create the name. Backup file names are in one of two forms:
+ *
+ * In a transactional env: __db.LSN(8).LSN(8)
+ * and
+ * in a non-transactional env: __db.FILENAME.
+ *
+ * If the transaction doesn't have a current LSN, we write
+ * a dummy log record to force it, so that we ensure that
+ * all tmp names are unique.
+ *
+ * In addition, the name passed may contain an env-relative path.
+ * In that case, put the __db. in the right place (in the last
+ * component of the pathname).
+ */
+ if (txn != NULL) {
+ if (IS_ZERO_LSN(txn->last_lsn)) {
+ /*
+ * Write dummy log record. The two choices for
+ * dummy log records are __db_noop_log and
+ * __db_debug_log; unfortunately __db_noop_log requires
+ * a valid dbp, and we aren't guaranteed to be able
+ * to pass one in here.
+ */
+ if ((ret = __db_debug_log(dbenv, txn, &lsn, 0,
+ NULL, 0, NULL, NULL, 0)) != 0)
+ return (ret);
+ } else
+ lsn = txn->last_lsn;
+ }
+
+ /*
+ * Part of the name may be a full path, so we need to make sure that
+ * we allocate enough space for it, even in the case where we don't
+ * use the entire filename for the backup name.
+ */
+ len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT;
+
+ if ((ret = __os_malloc(dbenv, len, &retp)) != 0)
+ return (ret);
+
+ /*
+ * There are four cases here:
+ * 1. simple path w/out transaction
+ * 2. simple path + transaction
+ * 3. multi-component path w/out transaction
+ * 4. multi-component path + transaction
+ */
+ if ((p = __db_rpath(name)) == NULL) {
+ if (txn == NULL) /* case 1 */
+ snprintf(retp, len, "%s%s.", BACKUP_PREFIX, name);
+ else /* case 2 */
+ snprintf(retp, len,
+ "%s%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset);
+ } else {
+ plen = (int)(p - name) + 1;
+ p++;
+ if (txn == NULL) /* case 3 */
+ snprintf(retp, len,
+ "%.*s%s%s.", plen, name, BACKUP_PREFIX, p);
+ else /* case 4 */
+ snprintf(retp, len,
+ "%.*s%x.%x.", plen, name, lsn.file, lsn.offset);
+ }
+
+ *backup = retp;
+ return (0);
+}
+
+/*
+ * __dblist_get --
+ * Get the first element of dbenv->dblist with
+ * dbp->adj_fileid matching adjid.
+ *
+ * PUBLIC: DB *__dblist_get __P((DB_ENV *, u_int32_t));
+ */
+DB *
+__dblist_get(dbenv, adjid)
+ DB_ENV *dbenv;
+ u_int32_t adjid;
+{
+ DB *dbp;
+
+ for (dbp = LIST_FIRST(&dbenv->dblist);
+ dbp != NULL && dbp->adj_fileid != adjid;
+ dbp = LIST_NEXT(dbp, dblistlinks))
+ ;
+
+ return (dbp);
+}
+
+/*
+ * __db_disassociate --
+ * Destroy the association between a given secondary and its primary.
+ */
+static int
+__db_disassociate(sdbp)
+ DB *sdbp;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+
+ sdbp->s_callback = NULL;
+ sdbp->s_primary = NULL;
+ sdbp->get = sdbp->stored_get;
+ sdbp->close = sdbp->stored_close;
+
+ /*
+ * Complain, but proceed, if we have any active cursors. (We're in
+ * the middle of a close, so there's really no turning back.)
+ */
+ if (sdbp->s_refcnt != 1 ||
+ TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(sdbp->dbenv,
+ "Closing a primary DB while a secondary DB has active cursors is unsafe");
+ ret = EINVAL;
+ }
+ sdbp->s_refcnt = 0;
+
+ while ((dbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(sdbp, DB_AM_SECONDARY);
+ return (ret);
+}
+
+#if CONFIG_TEST
+/*
+ * __db_testcopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ * PUBLIC: #if CONFIG_TEST
+ * PUBLIC: int __db_testcopy __P((DB_ENV *, DB *, const char *));
+ * PUBLIC: #endif
+ */
+int
+__db_testcopy(dbenv, dbp, name)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+{
+ DB_MPOOLFILE *mpf;
+
+ DB_ASSERT(dbp != NULL || name != NULL);
+
+ if (name == NULL) {
+ mpf = dbp->mpf;
+ name = R_ADDR(mpf->dbmp->reginfo, mpf->mfp->path_off);
+ }
+
+ if (dbp != NULL && dbp->type == DB_QUEUE)
+ return (__qam_testdocopy(dbp, name));
+ else
+ return (__db_testdocopy(dbenv, name));
+}
+
+static int
+__qam_testdocopy(dbp, name)
+ DB *dbp;
+ const char *name;
+{
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[256], *dir;
+ int ret;
+
+ filelist = NULL;
+ if ((ret = __db_testdocopy(dbp->dbenv, name)) != 0)
+ return (ret);
+ if (dbp->mpf != NULL &&
+ (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ if ((ret = __db_testdocopy(dbp->dbenv, buf)) != 0)
+ return (ret);
+ }
+
+ __os_free(dbp->dbenv, filelist);
+ return (0);
+}
+
+/*
+ * __db_testdocopy
+ * Create a copy of all backup files and our "main" DB.
+ *
+ */
+static int
+__db_testdocopy(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ size_t len;
+ int dircnt, i, ret;
+ char **namesp, *backup, *copy, *dir, *p, *real_name;
+ real_name = NULL;
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ copy = backup = NULL;
+ namesp = NULL;
+
+ /*
+ * Maximum size of file, including adding a ".afterop".
+ */
+ len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9;
+
+ if ((ret = __os_malloc(dbenv, len, &copy)) != 0)
+ goto out;
+
+ if ((ret = __os_malloc(dbenv, len, &backup)) != 0)
+ goto out;
+
+ /*
+ * First copy the file itself.
+ */
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+
+ if ((ret = __os_strdup(dbenv, real_name, &dir)) != 0)
+ goto out;
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ /*
+ * Create the name. Backup file names are of the form:
+ *
+ * __db.name.0x[lsn-file].0x[lsn-offset]
+ *
+ * which guarantees uniqueness. We want to look for the
+ * backup name, followed by a '.0x' (so that if they have
+ * files named, say, 'a' and 'abc' we won't match 'abc' when
+ * looking for 'a'.
+ */
+ snprintf(backup, len, "%s%s.0x", BACKUP_PREFIX, name);
+
+ /*
+ * We need the directory path to do the __os_dirlist.
+ */
+ p = __db_rpath(dir);
+ if (p != NULL)
+ *p = '\0';
+ ret = __os_dirlist(dbenv, dir, &namesp, &dircnt);
+#if DIAGNOSTIC
+ /*
+ * XXX
+ * To get the memory guard code to work because it uses strlen and we
+ * just moved the end of the string somewhere sooner. This causes the
+ * guard code to fail because it looks at one byte past the end of the
+ * string.
+ */
+ *p = '/';
+#endif
+ __os_free(dbenv, dir);
+ if (ret != 0)
+ goto out;
+ for (i = 0; i < dircnt; i++) {
+ /*
+ * Need to check if it is a backup file for this.
+ * No idea what namesp[i] may be or how long, so
+ * must use strncmp and not memcmp. We don't want
+ * to use strcmp either because we are only matching
+ * the first part of the real file's name. We don't
+ * know its LSN's.
+ */
+ if (strncmp(namesp[i], backup, strlen(backup)) == 0) {
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ namesp[i], 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /*
+ * This should not happen. Check that old
+ * .afterop files aren't around.
+ * If so, just move on.
+ */
+ if (strstr(real_name, ".afterop") != NULL) {
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ continue;
+ }
+ snprintf(copy, len, "%s.afterop", real_name);
+ __db_makecopy(real_name, copy);
+ __os_free(dbenv, real_name);
+ real_name = NULL;
+ }
+ }
+out:
+ if (backup != NULL)
+ __os_free(dbenv, backup);
+ if (copy != NULL)
+ __os_free(dbenv, copy);
+ if (namesp != NULL)
+ __os_dirfree(dbenv, namesp, dircnt);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+static void
+__db_makecopy(src, dest)
+ const char *src, *dest;
+{
+ DB_FH rfh, wfh;
+ size_t rcnt, wcnt;
+ char *buf;
+
+ memset(&rfh, 0, sizeof(rfh));
+ memset(&wfh, 0, sizeof(wfh));
+
+ if (__os_malloc(NULL, 1024, &buf) != 0)
+ return;
+
+ if (__os_open(NULL,
+ src, DB_OSO_RDONLY, __db_omode("rw----"), &rfh) != 0)
+ goto err;
+ if (__os_open(NULL, dest,
+ DB_OSO_CREATE | DB_OSO_TRUNC, __db_omode("rw----"), &wfh) != 0)
+ goto err;
+
+ for (;;)
+ if (__os_read(NULL, &rfh, buf, 1024, &rcnt) < 0 || rcnt == 0 ||
+ __os_write(NULL, &wfh, buf, rcnt, &wcnt) < 0)
+ break;
+
+err: __os_free(NULL, buf);
+ if (F_ISSET(&rfh, DB_FH_VALID))
+ __os_closehandle(NULL, &rfh);
+ if (F_ISSET(&wfh, DB_FH_VALID))
+ __os_closehandle(NULL, &wfh);
+}
+#endif
diff --git a/storage/bdb/db/db.src b/storage/bdb/db/db.src
new file mode 100644
index 00000000000..414321fcbbd
--- /dev/null
+++ b/storage/bdb/db/db.src
@@ -0,0 +1,195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db.src,v 11.18 2002/04/17 19:02:58 krinsky Exp $
+ */
+
+PREFIX __db
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * addrem -- Add or remove an entry from a duplicate page.
+ *
+ * opcode: identifies if this is an add or delete.
+ * fileid: file identifier of the file being modified.
+ * pgno: duplicate page number.
+ * indx: location at which to insert or delete.
+ * nbytes: number of bytes added/removed to/from the page.
+ * hdr: header for the data item.
+ * dbt: data that is deleted or is to be added.
+ * pagelsn: former lsn of the page.
+ *
+ * If the hdr was NULL then, the dbt is a regular B_KEYDATA.
+ * If the dbt was NULL then the hdr is a complete item to be
+ * pasted on the page.
+ */
+BEGIN addrem 41
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG nbytes u_int32_t lu
+PGDBT hdr DBT s
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * big -- Handles addition and deletion of big key/data items.
+ *
+ * opcode: identifies get/put.
+ * fileid: file identifier of the file being modified.
+ * pgno: page onto which data is being added/removed.
+ * prev_pgno: the page before the one we are logging.
+ * next_pgno: the page after the one we are logging.
+ * dbt: data being written onto the page.
+ * pagelsn: former lsn of the orig_page.
+ * prevlsn: former lsn of the prev_pgno.
+ * nextlsn: former lsn of the next_pgno. This is not currently used, but
+ * may be used later if we actually do overwrites of big key/
+ * data items in place.
+ */
+BEGIN big 43
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+WRLOCKNZ prev_pgno db_pgno_t lu
+WRLOCKNZ next_pgno db_pgno_t lu
+DBT dbt DBT s
+POINTER pagelsn DB_LSN * lu
+POINTER prevlsn DB_LSN * lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * ovref -- Handles increment/decrement of overflow page reference count.
+ *
+ * fileid: identifies the file being modified.
+ * pgno: page number whose ref count is being incremented/decremented.
+ * adjust: the adjustment being made.
+ * lsn: the page's original lsn.
+ */
+BEGIN ovref 44
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG adjust int32_t ld
+POINTER lsn DB_LSN * lu
+END
+
+/*
+ * relink -- Handles relinking around a page.
+ *
+ * opcode: indicates if this is an addpage or delete page
+ * pgno: the page being changed.
+ * lsn the page's original lsn.
+ * prev: the previous page.
+ * lsn_prev: the previous page's original lsn.
+ * next: the next page.
+ * lsn_next: the previous page's original lsn.
+ */
+BEGIN relink 45
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER lsn DB_LSN * lu
+WRLOCKNZ prev db_pgno_t lu
+POINTER lsn_prev DB_LSN * lu
+WRLOCKNZ next db_pgno_t lu
+POINTER lsn_next DB_LSN * lu
+END
+
+/*
+ * Debug -- log an operation upon entering an access method.
+ * op: Operation (cursor, c_close, c_get, c_put, c_del,
+ * get, put, delete).
+ * fileid: identifies the file being acted upon.
+ * key: key paramater
+ * data: data parameter
+ * flags: flags parameter
+ */
+BEGIN debug 47
+DBT op DBT s
+ARG fileid int32_t ld
+DBT key DBT s
+DBT data DBT s
+ARG arg_flags u_int32_t lu
+END
+
+/*
+ * noop -- do nothing, but get an LSN.
+ */
+BEGIN noop 48
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+END
+
+/*
+ * pg_alloc: used to record allocating a new page.
+ *
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno the meta-data page number.
+ * page_lsn: the allocated page's original lsn.
+ * pgno: the page allocated.
+ * ptype: the type of the page allocated.
+ * next: the next page on the free list.
+ */
+BEGIN pg_alloc 49
+DB fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+POINTER page_lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG ptype u_int32_t lu
+ARG next db_pgno_t lu
+END
+
+/*
+ * pg_free: used to record freeing a page.
+ *
+ * pgno: the page being freed.
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno: the meta-data page number.
+ * header: the header from the free'd page.
+ * next: the previous next pointer on the metadata page.
+ */
+BEGIN pg_free 50
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+PGDBT header DBT s
+ARG next db_pgno_t lu
+END
+
+/*
+ * cksum --
+ * This log record is written when we're unable to checksum a page,
+ * before returning DB_RUNRECOVERY. This log record causes normal
+ * recovery to itself return DB_RUNRECOVERY, as only catastrophic
+ * recovery can fix things.
+ */
+BEGIN cksum 51
+END
diff --git a/storage/bdb/db/db_am.c b/storage/bdb/db/db_am.c
new file mode 100644
index 00000000000..cf6ef18549b
--- /dev/null
+++ b/storage/bdb/db/db_am.c
@@ -0,0 +1,1271 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_am.c,v 11.96 2002/08/27 15:17:32 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+
+static int __db_append_primary __P((DBC *, DBT *, DBT *));
+static int __db_secondary_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __db_secondary_close __P((DB *, u_int32_t));
+
+#ifdef DEBUG
+static int __db_cprint_item __P((DBC *));
+#endif
+
+/*
+ * __db_cursor --
+ * Allocate and return a cursor.
+ *
+ * PUBLIC: int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ */
+int
+__db_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ db_lockmode_t mode;
+ u_int32_t op;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor");
+
+ /* Validate arguments. */
+ if ((ret = __db_cursorchk(dbp, flags)) != 0)
+ return (ret);
+
+ /*
+ * Check for consistent transaction usage. For now, assume that
+ * this cursor might be used for read operations only (in which
+ * case it may not require a txn). We'll check more stringently
+ * in c_del and c_put. (Note that this all means that the
+ * read-op txn tests have to be a subset of the write-op ones.)
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, DB_LOCK_INVALIDID, dbcp)) != 0)
+ return (ret);
+ dbc = *dbcp;
+
+ /*
+ * If this is CDB, do all the locking in the interface, which is
+ * right here.
+ */
+ if (CDB_LOCKING(dbenv)) {
+ op = LF_ISSET(DB_OPFLAGS_MASK);
+ mode = (op == DB_WRITELOCK) ? DB_LOCK_WRITE :
+ ((op == DB_WRITECURSOR) ? DB_LOCK_IWRITE : DB_LOCK_READ);
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker, 0,
+ &dbc->lock_dbt, mode, &dbc->mylock)) != 0) {
+ (void)__db_c_close(dbc);
+ return (ret);
+ }
+ if (op == DB_WRITECURSOR)
+ F_SET(dbc, DBC_WRITECURSOR);
+ if (op == DB_WRITELOCK)
+ F_SET(dbc, DBC_WRITER);
+ }
+
+ if (LF_ISSET(DB_DIRTY_READ) ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
+ return (0);
+}
+
+/*
+ * __db_icursor --
+ * Internal version of __db_cursor. If dbcp is
+ * non-NULL it is assumed to point to an area to
+ * initialize as a cursor.
+ *
+ * PUBLIC: int __db_icursor
+ * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **));
+ */
+int
+__db_icursor(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
+ DB *dbp;
+ DB_TXN *txn;
+ DBTYPE dbtype;
+ db_pgno_t root;
+ int is_opd;
+ u_int32_t lockerid;
+ DBC **dbcp;
+{
+ DBC *dbc, *adbc;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ int allocated, ret;
+
+ dbenv = dbp->dbenv;
+ allocated = 0;
+
+ /*
+ * Take one from the free list if it's available. Take only the
+ * right type. With off page dups we may have different kinds
+ * of cursors on the queue for a single database.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if (dbtype == dbc->dbtype) {
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ F_CLR(dbc, ~DBC_OWN_LID);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (dbc == NULL) {
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ return (ret);
+ allocated = 1;
+ dbc->flags = 0;
+
+ dbc->dbp = dbp;
+
+ /* Set up locking information. */
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * If we are not threaded, then there is no need to
+ * create new locker ids. We know that no one else
+ * is running concurrently using this DB, so we can
+ * take a peek at any cursors on the active queue.
+ */
+ if (!DB_IS_THREADED(dbp) &&
+ (adbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ dbc->lid = adbc->lid;
+ else {
+ if ((ret =
+ dbenv->lock_id(dbenv, &dbc->lid)) != 0)
+ goto err;
+ F_SET(dbc, DBC_OWN_LID);
+ }
+
+ /*
+ * In CDB, secondary indices should share a lock file
+ * ID with the primary; otherwise we're susceptible to
+ * deadlocks. We also use __db_icursor rather
+ * than sdbp->cursor to create secondary update
+ * cursors in c_put and c_del; these won't
+ * acquire a new lock.
+ *
+ * !!!
+ * Since this is in the one-time cursor allocation
+ * code, we need to be sure to destroy, not just
+ * close, all cursors in the secondary when we
+ * associate.
+ */
+ if (CDB_LOCKING(dbp->dbenv) &&
+ F_ISSET(dbp, DB_AM_SECONDARY))
+ memcpy(dbc->lock.fileid,
+ dbp->s_primary->fileid, DB_FILE_ID_LEN);
+ else
+ memcpy(dbc->lock.fileid,
+ dbp->fileid, DB_FILE_ID_LEN);
+
+ if (CDB_LOCKING(dbenv)) {
+ if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) {
+ /*
+ * If we are doing a single lock per
+ * environment, set up the global
+ * lock object just like we do to
+ * single thread creates.
+ */
+ DB_ASSERT(sizeof(db_pgno_t) ==
+ sizeof(u_int32_t));
+ dbc->lock_dbt.size = sizeof(u_int32_t);
+ dbc->lock_dbt.data = &dbc->lock.pgno;
+ dbc->lock.pgno = 0;
+ } else {
+ dbc->lock_dbt.size = DB_FILE_ID_LEN;
+ dbc->lock_dbt.data = dbc->lock.fileid;
+ }
+ } else {
+ dbc->lock.type = DB_PAGE_LOCK;
+ dbc->lock_dbt.size = sizeof(dbc->lock);
+ dbc->lock_dbt.data = &dbc->lock;
+ }
+ }
+ /* Init the DBC internal structure. */
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_init(dbc, dbtype)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_init(dbc)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_c_init(dbc)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_icursor", dbtype);
+ goto err;
+ }
+
+ cp = dbc->internal;
+ }
+
+ /* Refresh the DBC structure. */
+ dbc->dbtype = dbtype;
+ RESET_RET_MEM(dbc);
+
+ if ((dbc->txn = txn) == NULL) {
+ /*
+ * There are certain cases in which we want to create a
+ * new cursor with a particular locker ID that is known
+ * to be the same as (and thus not conflict with) an
+ * open cursor.
+ *
+ * The most obvious case is cursor duplication; when we
+ * call DBC->c_dup or __db_c_idup, we want to use the original
+ * cursor's locker ID.
+ *
+ * Another case is when updating secondary indices. Standard
+ * CDB locking would mean that we might block ourself: we need
+ * to open an update cursor in the secondary while an update
+ * cursor in the primary is open, and when the secondary and
+ * primary are subdatabases or we're using env-wide locking,
+ * this is disastrous.
+ *
+ * In these cases, our caller will pass a nonzero locker ID
+ * into this function. Use this locker ID instead of dbc->lid
+ * as the locker ID for our new cursor.
+ */
+ if (lockerid != DB_LOCK_INVALIDID)
+ dbc->locker = lockerid;
+ else
+ dbc->locker = dbc->lid;
+ } else {
+ dbc->locker = txn->txnid;
+ txn->cursors++;
+ }
+
+ /*
+ * These fields change when we are used as a secondary index, so
+ * if the DB is a secondary, make sure they're set properly just
+ * in case we opened some cursors before we were associated.
+ *
+ * __db_c_get is used by all access methods, so this should be safe.
+ */
+ if (F_ISSET(dbp, DB_AM_SECONDARY))
+ dbc->c_get = __db_c_secondary_get;
+
+ if (is_opd)
+ F_SET(dbc, DBC_OPD);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ F_SET(dbc, DBC_RECOVER);
+ if (F_ISSET(dbp, DB_AM_COMPENSATE))
+ F_SET(dbc, DBC_COMPENSATE);
+
+ /* Refresh the DBC internal structure. */
+ cp = dbc->internal;
+ cp->opd = NULL;
+
+ cp->indx = 0;
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->root = root;
+
+ switch (dbtype) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_refresh(dbc)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ case DB_QUEUE:
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv, "__db_icursor", dbp->type);
+ goto err;
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ F_SET(dbc, DBC_ACTIVE);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ *dbcp = dbc;
+ return (0);
+
+err: if (allocated)
+ __os_free(dbp->dbenv, dbc);
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_cprint --
+ * Display the cursor active and free queues.
+ *
+ * PUBLIC: int __db_cprint __P((DB *));
+ */
+int
+__db_cprint(dbp)
+ DB *dbp;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ fprintf(stderr, "Active queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ fprintf(stderr, "Free queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+static
+int __db_cprint_item(dbc)
+ DBC *dbc;
+{
+ static const FN fn[] = {
+ { DBC_ACTIVE, "active" },
+ { DBC_COMPENSATE, "compensate" },
+ { DBC_OPD, "off-page-dup" },
+ { DBC_RECOVER, "recover" },
+ { DBC_RMW, "read-modify-write" },
+ { DBC_TRANSIENT, "transient" },
+ { DBC_WRITECURSOR, "write cursor" },
+ { DBC_WRITEDUP, "internally dup'ed write cursor" },
+ { DBC_WRITER, "short-term write cursor" },
+ { 0, NULL }
+ };
+ DB *dbp;
+ DBC_INTERNAL *cp;
+ const char *s;
+
+ dbp = dbc->dbp;
+ cp = dbc->internal;
+
+ s = __db_dbtype_to_string(dbc->dbtype);
+ if (strcmp(s, "UNKNOWN TYPE") == 0) {
+ DB_ASSERT(0);
+ return (1);
+ }
+ fprintf(stderr, "%s/%#0lx: opd: %#0lx\n",
+ s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd));
+
+ fprintf(stderr, "\ttxn: %#0lx lid: %lu locker: %lu\n",
+ P_TO_ULONG(dbc->txn), (u_long)dbc->lid, (u_long)dbc->locker);
+
+ fprintf(stderr, "\troot: %lu page/index: %lu/%lu",
+ (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx);
+
+ __db_prflags(dbc->flags, fn, stderr);
+ fprintf(stderr, "\n");
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ __bam_cprint(dbc);
+ break;
+ case DB_HASH:
+ __ham_cprint(dbc);
+ break;
+ default:
+ break;
+ }
+ return (0);
+}
+#endif /* DEBUG */
+
+/*
+ * db_fd --
+ * Return a file descriptor for flock'ing.
+ *
+ * PUBLIC: int __db_fd __P((DB *, int *));
+ */
+int
+__db_fd(dbp, fdp)
+ DB *dbp;
+ int *fdp;
+{
+ DB_FH *fhp;
+ int ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->fd");
+
+ /*
+ * XXX
+ * Truly spectacular layering violation.
+ */
+ if ((ret = __mp_xxx_fh(dbp->mpf, &fhp)) != 0)
+ return (ret);
+
+ if (F_ISSET(fhp, DB_FH_VALID)) {
+ *fdp = fhp->fd;
+ return (0);
+ } else {
+ *fdp = -1;
+ __db_err(dbp->dbenv, "DB does not have a valid file handle");
+ return (ENOENT);
+ }
+}
+
+/*
+ * __db_get --
+ * Return a key/data pair.
+ *
+ * PUBLIC: int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int mode, ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->get");
+
+ if ((ret = __db_getchk(dbp, key, data, flags)) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ mode = 0;
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ mode = DB_DIRTY_READ;
+ LF_CLR(DB_DIRTY_READ);
+ }
+ else if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ mode = DB_WRITELOCK;
+ if ((ret = dbp->cursor(dbp, txn, &dbc, mode)) != 0)
+ return (ret);
+
+ DEBUG_LREAD(dbc, txn, "__db_get", key, NULL, flags);
+
+ /*
+ * The DBC_TRANSIENT flag indicates that we're just doing a
+ * single operation with this cursor, and that in case of
+ * error we don't need to restore it to its old position--we're
+ * going to close it right away. Thus, we can perform the get
+ * without duplicating the cursor, saving some cycles in this
+ * common case.
+ *
+ * SET_RET_MEM indicates that if key and/or data have no DBT
+ * flags set and DB manages the returned-data memory, that memory
+ * will belong to this handle, not to the underlying cursor.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+ SET_RET_MEM(dbc, dbp);
+
+ if (LF_ISSET(~(DB_RMW | DB_MULTIPLE)) == 0)
+ LF_SET(DB_SET);
+ ret = dbc->c_get(dbc, key, data, flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_put --
+ * Store a key/data pair.
+ *
+ * PUBLIC: int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT tdata;
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put");
+
+ /* Validate arguments. */
+ if ((ret = __db_putchk(dbp, key, data,
+ flags, F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) != 0)
+ return (ret);
+
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_put", key, data, flags);
+
+ SET_RET_MEM(dbc, dbp);
+
+ /*
+ * See the comment in __db_get().
+ *
+ * Note that the c_get in the DB_NOOVERWRITE case is safe to
+ * do with this flag set; if it errors in any way other than
+ * DB_NOTFOUND, we're going to close the cursor without doing
+ * anything else, and if it returns DB_NOTFOUND then it's safe
+ * to do a c_put(DB_KEYLAST) even if an access method moved the
+ * cursor, since that's not position-dependent.
+ */
+ F_SET(dbc, DBC_TRANSIENT);
+
+ switch (flags) {
+ case DB_APPEND:
+ /*
+ * If there is an append callback, the value stored in
+ * data->data may be replaced and then freed. To avoid
+ * passing a freed pointer back to the user, just operate
+ * on a copy of the data DBT.
+ */
+ tdata = *data;
+
+ /*
+ * Append isn't a normal put operation; call the appropriate
+ * access method's append function.
+ */
+ switch (dbp->type) {
+ case DB_QUEUE:
+ if ((ret = __qam_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ case DB_RECNO:
+ if ((ret = __ram_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ default:
+ /* The interface should prevent this. */
+ DB_ASSERT(0);
+ ret = __db_ferr(dbenv, "__db_put", flags);
+ goto err;
+ }
+
+ /*
+ * Secondary indices: since we've returned zero from
+ * an append function, we've just put a record, and done
+ * so outside __db_c_put. We know we're not a secondary--
+ * the interface prevents puts on them--but we may be a
+ * primary. If so, update our secondary indices
+ * appropriately.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL)
+ ret = __db_append_primary(dbc, key, &tdata);
+
+ /*
+ * The append callback, if one exists, may have allocated
+ * a new tdata.data buffer. If so, free it.
+ */
+ FREE_IF_NEEDED(dbp, &tdata);
+
+ /* No need for a cursor put; we're done. */
+ goto err;
+ case DB_NOOVERWRITE:
+ flags = 0;
+ /*
+ * Set DB_DBT_USERMEM, this might be a threaded application and
+ * the flags checking will catch us. We don't want the actual
+ * data, so request a partial of length 0.
+ */
+ memset(&tdata, 0, sizeof(tdata));
+ F_SET(&tdata, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If we're doing page-level locking, set the read-modify-write
+ * flag, we're going to overwrite immediately.
+ */
+ if ((ret = dbc->c_get(dbc, key, &tdata,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0))) == 0)
+ ret = DB_KEYEXIST;
+ else if (ret == DB_NOTFOUND || ret == DB_KEYEMPTY)
+ ret = 0;
+ break;
+ default:
+ /* Fall through to normal cursor put. */
+ break;
+ }
+ if (ret == 0)
+ ret = dbc->c_put(dbc,
+ key, data, flags == 0 ? DB_KEYLAST : flags);
+
+err: /* Close the cursor. */
+ if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_delete --
+ * Delete the items referenced by a key.
+ *
+ * PUBLIC: int __db_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__db_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT data, lkey;
+ DB_ENV *dbenv;
+ u_int32_t f_init, f_next;
+ int ret, t_ret, txn_local;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
+
+ /* Check for invalid flags. */
+ if ((ret = __db_delchk(dbp, key, flags)) != 0)
+ return (ret);
+
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_delete", key, NULL, flags);
+
+ /*
+ * Walk a cursor through the key/data pairs, deleting as we go. Set
+ * the DB_DBT_USERMEM flag, as this might be a threaded application
+ * and the flags checking will catch us. We don't actually want the
+ * keys or data, so request a partial of length 0.
+ */
+ memset(&lkey, 0, sizeof(lkey));
+ F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If locking (and we haven't already acquired CDB locks), set the
+ * read-modify-write flag.
+ */
+ f_init = DB_SET;
+ f_next = DB_NEXT_DUP;
+ if (STD_LOCKING(dbc)) {
+ f_init |= DB_RMW;
+ f_next |= DB_RMW;
+ }
+
+ /* Walk through the set of key/data pairs, deleting as we go. */
+ if ((ret = dbc->c_get(dbc, key, &data, f_init)) != 0)
+ goto err;
+
+ /*
+ * Hash permits an optimization in DB->del: since on-page
+ * duplicates are stored in a single HKEYDATA structure, it's
+ * possible to delete an entire set of them at once, and as
+ * the HKEYDATA has to be rebuilt and re-put each time it
+ * changes, this is much faster than deleting the duplicates
+ * one by one. Thus, if we're not pointing at an off-page
+ * duplicate set, and we're not using secondary indices (in
+ * which case we'd have to examine the items one by one anyway),
+ * let hash do this "quick delete".
+ *
+ * !!!
+ * Note that this is the only application-executed delete call in
+ * Berkeley DB that does not go through the __db_c_del function.
+ * If anything other than the delete itself (like a secondary index
+ * update) has to happen there in a particular situation, the
+ * conditions here should be modified not to call __ham_quick_delete.
+ * The ordinary AM-independent alternative will work just fine with
+ * a hash; it'll just be slower.
+ */
+ if (dbp->type == DB_HASH) {
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL &&
+ !F_ISSET(dbp, DB_AM_SECONDARY) &&
+ dbc->internal->opd == NULL) {
+ ret = __ham_quick_delete(dbc);
+ goto err;
+ }
+ }
+
+ for (;;) {
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc, &lkey, &data, f_next)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ break;
+ }
+ goto err;
+ }
+ }
+
+err: /* Discard the cursor. */
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __db_sync __P((DB *, u_int32_t));
+ */
+int
+__db_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If it's a Recno tree, write the backing source text file. */
+ if (dbp->type == DB_RECNO)
+ ret = __ram_writeback(dbp);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_associate --
+ * Associate another database as a secondary index to this one.
+ *
+ * PUBLIC: int __db_associate __P((DB *, DB_TXN *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associate(dbp, txn, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ DB_TXN *txn;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *pdbc, *sdbc;
+ DBT skey, key, data;
+ int build, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ txn_local = 0;
+ pdbc = NULL;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&skey, 0, sizeof(DBT));
+
+ if ((ret = __db_associatechk(dbp, sdbp, callback, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create a local transaction as necessary, check for consistent
+ * transaction usage, and, if we have no transaction but do have
+ * locking on, acquire a locker id for the handle lock acquisition.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * Check that if an open transaction is in progress, we're in it,
+ * for other common transaction errors, and for concurrent associates.
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ return (ret);
+
+ sdbp->s_callback = callback;
+ sdbp->s_primary = dbp;
+
+ sdbp->stored_get = sdbp->get;
+ sdbp->get = __db_secondary_get;
+
+ sdbp->stored_close = sdbp->close;
+ sdbp->close = __db_secondary_close;
+
+ /*
+ * Secondary cursors may have the primary's lock file ID, so we
+ * need to make sure that no older cursors are lying around
+ * when we make the transition.
+ */
+ if (TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(dbenv,
+ "Databases may not become secondary indices while cursors are open");
+ ret = EINVAL;
+ goto err;
+ }
+ while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((ret = __db_c_destroy(sdbc)) != 0)
+ goto err;
+
+ F_SET(sdbp, DB_AM_SECONDARY);
+
+ /*
+ * Check to see if the secondary is empty--and thus if we should
+ * build it--before we link it in and risk making it show up in
+ * other threads.
+ */
+ build = 0;
+ if (LF_ISSET(DB_CREATE)) {
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc, 0)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ /*
+ * We don't care about key or data; we're just doing
+ * an existence check.
+ */
+ F_SET(&key, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ F_SET(&data, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = sdbc->c_real_get(sdbc, &key, &data,
+ (STD_LOCKING(sdbc) ? DB_RMW : 0) |
+ DB_FIRST)) == DB_NOTFOUND) {
+ build = 1;
+ ret = 0;
+ }
+
+ /*
+ * Secondary cursors have special refcounting close
+ * methods. Be careful.
+ */
+ if ((t_ret = __db_c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Add the secondary to the list on the primary. Do it here
+ * so that we see any updates that occur while we're walking
+ * the primary.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ /* See __db_s_next for an explanation of secondary refcounting. */
+ DB_ASSERT(sdbp->s_refcnt == 0);
+ sdbp->s_refcnt = 1;
+ LIST_INSERT_HEAD(&dbp->s_secondaries, sdbp, s_links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (build) {
+ /*
+ * We loop through the primary, putting each item we
+ * find into the new secondary.
+ *
+ * If we're using CDB, opening these two cursors puts us
+ * in a bit of a locking tangle: CDB locks are done on the
+ * primary, so that we stay deadlock-free, but that means
+ * that updating the secondary while we have a read cursor
+ * open on the primary will self-block. To get around this,
+ * we force the primary cursor to use the same locker ID
+ * as the secondary, so they won't conflict. This should
+ * be harmless even if we're not using CDB.
+ */
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc,
+ CDB_LOCKING(sdbp->dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /* Lock out other threads, now that we have a locker ID. */
+ dbp->associate_lid = sdbc->locker;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ while ((ret = pdbc->c_get(pdbc, &key, &data, DB_NEXT)) == 0) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = callback(sdbp, &key, &data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbc->c_put(sdbc,
+ &skey, &key, DB_UPDATE_SECONDARY)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ if ((ret = sdbc->c_close(sdbc)) != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_pget --
+ * Return a primary key/data pair given a secondary key.
+ *
+ * PUBLIC: int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_pget(dbp, txn, skey, pkey, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->pget");
+
+ if ((ret = __db_pgetchk(dbp, skey, pkey, data, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ SET_RET_MEM(dbc, dbp);
+
+ /*
+ * The underlying cursor pget will fill in a default DBT for null
+ * pkeys, and use the cursor's returned-key memory internally to
+ * store any intermediate primary keys. However, we've just set
+ * the returned-key memory to the DB handle's key memory, which
+ * is unsafe to use if the DB handle is threaded. If the pkey
+ * argument is NULL, use the DBC-owned returned-key memory
+ * instead; it'll go away when we close the cursor before we
+ * return, but in this case that's just fine, as we're not
+ * returning the primary key.
+ */
+ if (pkey == NULL)
+ dbc->rkey = &dbc->my_rkey;
+
+ DEBUG_LREAD(dbc, txn, "__db_pget", skey, NULL, flags);
+
+ /*
+ * The cursor is just a perfectly ordinary secondary database
+ * cursor. Call its c_pget() method to do the dirty work.
+ */
+ if (flags == 0 || flags == DB_RMW)
+ flags |= DB_SET;
+ ret = dbc->c_pget(dbc, skey, pkey, data, flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_secondary_get --
+ * This wrapper function for DB->pget() is the DB->get() function
+ * on a database which has been made into a secondary index.
+ */
+static int
+__db_secondary_get(sdbp, txn, skey, data, flags)
+ DB *sdbp;
+ DB_TXN *txn;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(sdbp, DB_AM_SECONDARY));
+ return (sdbp->pget(sdbp, txn, skey, NULL, data, flags));
+}
+
+/*
+ * __db_secondary_close --
+ * Wrapper function for DB->close() which we use on secondaries to
+ * manage refcounting and make sure we don't close them underneath
+ * a primary that is updating.
+ */
+static int
+__db_secondary_close(sdbp, flags)
+ DB *sdbp;
+ u_int32_t flags;
+{
+ DB *primary;
+ int doclose;
+
+ doclose = 0;
+ primary = sdbp->s_primary;
+
+ MUTEX_THREAD_LOCK(primary->dbenv, primary->mutexp);
+ /*
+ * Check the refcount--if it was at 1 when we were called, no
+ * thread is currently updating this secondary through the primary,
+ * so it's safe to close it for real.
+ *
+ * If it's not safe to do the close now, we do nothing; the
+ * database will actually be closed when the refcount is decremented,
+ * which can happen in either __db_s_next or __db_s_done.
+ */
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ /* We don't want to call close while the mutex is held. */
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(primary->dbenv, primary->mutexp);
+
+ /*
+ * sdbp->close is this function; call the real one explicitly if
+ * need be.
+ */
+ return (doclose ? __db_close(sdbp, flags) : 0);
+}
+
+/*
+ * __db_append_primary --
+ * Perform the secondary index updates necessary to put(DB_APPEND)
+ * a record to a primary database.
+ */
+static int
+__db_append_primary(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc, *pdbc;
+ DBT oldpkey, pkey, pdata, skey;
+ int cmp, ret, t_ret;
+
+ dbp = dbc->dbp;
+ sdbp = NULL;
+ ret = 0;
+
+ /*
+ * Worrying about partial appends seems a little like worrying
+ * about Linear A character encodings. But we support those
+ * too if your application understands them.
+ */
+ pdbc = NULL;
+ if (F_ISSET(data, DB_DBT_PARTIAL) || F_ISSET(key, DB_DBT_PARTIAL)) {
+ /*
+ * The dbc we were passed is all set to pass things
+ * back to the user; we can't safely do a call on it.
+ * Dup the cursor, grab the real data item (we don't
+ * care what the key is--we've been passed it directly),
+ * and use that instead of the data DBT we were passed.
+ *
+ * Note that we can get away with this simple get because
+ * an appended item is by definition new, and the
+ * correctly-constructed full data item from this partial
+ * put is on the page waiting for us.
+ */
+ if ((ret = __db_c_idup(dbc, &pdbc, DB_POSITIONI)) != 0)
+ return (ret);
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&pdata, 0, sizeof(DBT));
+
+ if ((ret = pdbc->c_get(pdbc, &pkey, &pdata, DB_CURRENT)) != 0)
+ goto err;
+
+ key = &pkey;
+ data = &pdata;
+ }
+
+ /*
+ * Loop through the secondary indices, putting a new item in
+ * each that points to the appended item.
+ *
+ * This is much like the loop in "step 3" in __db_c_put, so
+ * I'm not commenting heavily here; it was unclean to excerpt
+ * just that section into a common function, but the basic
+ * overview is the same here.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, key, data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Since we know we have a new primary key, it can't be a
+ * duplicate duplicate in the secondary. It can be a
+ * duplicate in a secondary that doesn't support duplicates,
+ * however, so we need to be careful to avoid an overwrite
+ * (which would corrupt our index).
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc, &skey, &oldpkey,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0));
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, key);
+ /*
+ * XXX
+ * This needs to use the right free function
+ * as soon as this is possible.
+ */
+ __os_ufree(sdbp->dbenv,
+ oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Append results in a non-unique secondary key in",
+ " an index not configured to support duplicates");
+ ret = EINVAL;
+ goto err1;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto err1;
+ }
+
+ ret = sdbc->c_put(sdbc, &skey, key, DB_UPDATE_SECONDARY);
+
+err1: FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/storage/bdb/db/db_cam.c b/storage/bdb/db/db_cam.c
new file mode 100644
index 00000000000..4de3467d4aa
--- /dev/null
+++ b/storage/bdb/db/db_cam.c
@@ -0,0 +1,2286 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_cam.c,v 11.114 2002/09/03 15:44:46 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __db_buildpartial __P((DB *, DBT *, DBT *, DBT *));
+static int __db_c_cleanup __P((DBC *, DBC *, int));
+static int __db_c_del_secondary __P((DBC *));
+static int __db_c_pget_recno __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_wrlock_err __P((DB_ENV *));
+
+#define CDB_LOCKING_INIT(dbp, dbc) \
+ /* \
+ * If we are running CDB, this had better be either a write \
+ * cursor or an immediate writer. If it's a regular writer, \
+ * that means we have an IWRITE lock and we need to upgrade \
+ * it to a write lock. \
+ */ \
+ if (CDB_LOCKING((dbp)->dbenv)) { \
+ if (!F_ISSET(dbc, DBC_WRITECURSOR | DBC_WRITER)) \
+ return (__db_wrlock_err(dbp->dbenv)); \
+ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR) && \
+ (ret = (dbp)->dbenv->lock_get((dbp)->dbenv, \
+ (dbc)->locker, DB_LOCK_UPGRADE, &(dbc)->lock_dbt, \
+ DB_LOCK_WRITE, &(dbc)->mylock)) != 0) \
+ return (ret); \
+ }
+#define CDB_LOCKING_DONE(dbp, dbc) \
+ /* Release the upgraded lock. */ \
+ if (F_ISSET(dbc, DBC_WRITECURSOR)) \
+ (void)__lock_downgrade( \
+ (dbp)->dbenv, &(dbc)->mylock, DB_LOCK_IWRITE, 0);
+/*
+ * Copy the lock info from one cursor to another, so that locking
+ * in CDB can be done in the context of an internally-duplicated
+ * or off-page-duplicate cursor.
+ */
+#define CDB_LOCKING_COPY(dbp, dbc_o, dbc_n) \
+ if (CDB_LOCKING((dbp)->dbenv) && \
+ F_ISSET((dbc_o), DBC_WRITECURSOR | DBC_WRITEDUP)) { \
+ memcpy(&(dbc_n)->mylock, &(dbc_o)->mylock, \
+ sizeof((dbc_o)->mylock)); \
+ /* This lock isn't ours to put--just discard it on close. */ \
+ F_SET((dbc_n), DBC_WRITEDUP); \
+ }
+
+/*
+ * __db_c_close --
+ * Close the cursor.
+ *
+ * PUBLIC: int __db_c_close __P((DBC *));
+ */
+int
+__db_c_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * If the cursor is already closed we have a serious problem, and we
+ * assume that the cursor isn't on the active queue. Don't do any of
+ * the remaining cursor close processing.
+ */
+ if (!F_ISSET(dbc, DBC_ACTIVE)) {
+ if (dbp != NULL)
+ __db_err(dbenv, "Closing already-closed cursor");
+
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ cp = dbc->internal;
+ opd = cp->opd;
+
+ /*
+ * Remove the cursor(s) from the active queue. We may be closing two
+ * cursors at once here, a top-level one and a lower-level, off-page
+ * duplicate one. The acess-method specific cursor close routine must
+ * close both of them in a single call.
+ *
+ * !!!
+ * Cursors must be removed from the active queue before calling the
+ * access specific cursor close routine, btree depends on having that
+ * order of operations.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ if (opd != NULL) {
+ F_CLR(opd, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, opd, links);
+ }
+ F_CLR(dbc, DBC_ACTIVE);
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ /* Call the access specific cursor close routine. */
+ if ((t_ret =
+ dbc->c_am_close(dbc, PGNO_INVALID, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Release the lock after calling the access method specific close
+ * routine, a Btree cursor may have had pending deletes.
+ */
+ if (CDB_LOCKING(dbenv)) {
+ /*
+ * If DBC_WRITEDUP is set, the cursor is an internally
+ * duplicated write cursor and the lock isn't ours to put.
+ *
+ * Also, be sure not to free anything if mylock.off is
+ * INVALID; in some cases, such as idup'ed read cursors
+ * and secondary update cursors, a cursor in a CDB
+ * environment may not have a lock at all.
+ */
+ if (!F_ISSET(dbc, DBC_WRITEDUP) && LOCK_ISSET(dbc->mylock)) {
+ if ((t_ret = dbenv->lock_put(
+ dbenv, &dbc->mylock)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* For safety's sake, since this is going on the free queue. */
+ memset(&dbc->mylock, 0, sizeof(dbc->mylock));
+ F_CLR(dbc, DBC_WRITEDUP);
+ }
+
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+
+ /* Move the cursor(s) to the free queue. */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ if (opd != NULL) {
+ if (dbc->txn != NULL)
+ dbc->txn->cursors--;
+ TAILQ_INSERT_TAIL(&dbp->free_queue, opd, links);
+ opd = NULL;
+ }
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_c_destroy --
+ * Destroy the cursor, called after DBC->c_close.
+ *
+ * PUBLIC: int __db_c_destroy __P((DBC *));
+ */
+int
+__db_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ /* Remove the cursor from the free queue. */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ /* Free up allocated memory. */
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbenv, dbc->my_rdata.data);
+
+ /* Call the access specific cursor destroy routine. */
+ ret = dbc->c_am_destroy == NULL ? 0 : dbc->c_am_destroy(dbc);
+
+ /*
+ * Release the lock id for this cursor.
+ */
+ if (LOCKING_ON(dbenv) &&
+ F_ISSET(dbc, DBC_OWN_LID) &&
+ (t_ret = dbenv->lock_id_free(dbenv, dbc->lid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_c_count --
+ * Return a count of duplicate data items.
+ *
+ * PUBLIC: int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+ */
+int
+__db_c_count(dbc, recnop, flags)
+ DBC *dbc;
+ db_recno_t *recnop;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_ccountchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ switch (dbc->dbtype) {
+ case DB_QUEUE:
+ case DB_RECNO:
+ *recnop = 1;
+ break;
+ case DB_HASH:
+ if (dbc->internal->opd == NULL) {
+ if ((ret = __ham_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_BTREE:
+ if ((ret = __bam_c_count(dbc, recnop)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_unknown_type(dbp->dbenv,
+ "__db_c_count", dbp->type));
+ }
+ return (0);
+}
+
+/*
+ * __db_c_del --
+ * Delete using a cursor.
+ *
+ * PUBLIC: int __db_c_del __P((DBC *, u_int32_t));
+ */
+int
+__db_c_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *opd;
+ int ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are not duplicated and will not be cleaned up on return.
+ * So, pages/locks that the cursor references must be resolved by the
+ * underlying functions.
+ */
+ dbp = dbc->dbp;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cdelchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, dbc->txn, "db_c_del", NULL, NULL, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc);
+
+ /*
+ * If we're a secondary index, and DB_UPDATE_SECONDARY isn't set
+ * (which it only is if we're being called from a primary update),
+ * then we need to call through to the primary and delete the item.
+ *
+ * Note that this will delete the current item; we don't need to
+ * delete it ourselves as well, so we can just goto done.
+ */
+ if (flags != DB_UPDATE_SECONDARY && F_ISSET(dbp, DB_AM_SECONDARY)) {
+ ret = __db_c_del_secondary(dbc);
+ goto done;
+ }
+
+ /*
+ * If we are a primary and have secondary indices, go through
+ * and delete any secondary keys that point at the current record.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL &&
+ (ret = __db_c_del_primary(dbc)) != 0)
+ goto done;
+
+ /*
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the del operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ opd = dbc->internal->opd;
+ if (opd == NULL)
+ ret = dbc->c_am_del(dbc);
+ else
+ if ((ret = dbc->c_am_writelock(dbc)) == 0)
+ ret = opd->c_am_del(opd);
+
+done: CDB_LOCKING_DONE(dbp, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_c_dup --
+ * Duplicate a cursor
+ *
+ * PUBLIC: int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__db_c_dup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBC *dbc_n, *dbc_nopd;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbenv = dbp->dbenv;
+ dbc_n = dbc_nopd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /*
+ * We can never have two write cursors open in CDB, so do not
+ * allow duplication of a write cursor.
+ */
+ if (flags != DB_POSITIONI &&
+ F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR)) {
+ __db_err(dbenv, "Cannot duplicate writeable cursor");
+ return (EINVAL);
+ }
+
+ /* Allocate a new cursor and initialize it. */
+ if ((ret = __db_c_idup(dbc_orig, &dbc_n, flags)) != 0)
+ goto err;
+ *dbcp = dbc_n;
+
+ /*
+ * If we're in CDB, and this isn't an internal duplication (in which
+ * case we're explicitly overriding CDB locking), the duplicated
+ * cursor needs its own read lock. (We know it's not a write cursor
+ * because we wouldn't have made it this far; you can't dup them.)
+ */
+ if (CDB_LOCKING(dbenv) && flags != DB_POSITIONI) {
+ DB_ASSERT(!F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR));
+
+ if ((ret = dbenv->lock_get(dbenv, dbc_n->locker, 0,
+ &dbc_n->lock_dbt, DB_LOCK_READ, &dbc_n->mylock)) != 0) {
+ (void)__db_c_close(dbc_n);
+ return (ret);
+ }
+ }
+
+ /*
+ * If the cursor references an off-page duplicate tree, allocate a
+ * new cursor for that tree and initialize it.
+ */
+ if (dbc_orig->internal->opd != NULL) {
+ if ((ret =
+ __db_c_idup(dbc_orig->internal->opd, &dbc_nopd, flags)) != 0)
+ goto err;
+ dbc_n->internal->opd = dbc_nopd;
+ }
+
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
+ return (0);
+
+err: if (dbc_n != NULL)
+ (void)dbc_n->c_close(dbc_n);
+ if (dbc_nopd != NULL)
+ (void)dbc_nopd->c_close(dbc_nopd);
+
+ return (ret);
+}
+
+/*
+ * __db_c_idup --
+ * Internal version of __db_c_dup.
+ *
+ * PUBLIC: int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+ */
+int
+__db_c_idup(dbc_orig, dbcp, flags)
+ DBC *dbc_orig, **dbcp;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc_n;
+ DBC_INTERNAL *int_n, *int_orig;
+ int ret;
+
+ dbp = dbc_orig->dbp;
+ dbc_n = *dbcp;
+
+ if ((ret = __db_icursor(dbp, dbc_orig->txn, dbc_orig->dbtype,
+ dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD),
+ dbc_orig->locker, &dbc_n)) != 0)
+ return (ret);
+
+ /* If the user wants the cursor positioned, do it here. */
+ if (flags == DB_POSITION || flags == DB_POSITIONI) {
+ int_n = dbc_n->internal;
+ int_orig = dbc_orig->internal;
+
+ dbc_n->flags |= dbc_orig->flags & ~DBC_OWN_LID;
+
+ int_n->indx = int_orig->indx;
+ int_n->pgno = int_orig->pgno;
+ int_n->root = int_orig->root;
+ int_n->lock_mode = int_orig->lock_mode;
+
+ switch (dbc_orig->dbtype) {
+ case DB_QUEUE:
+ if ((ret = __qam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_c_dup(dbc_orig, dbc_n)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(dbp->dbenv,
+ "__db_c_idup", dbc_orig->dbtype);
+ goto err;
+ }
+ }
+
+ /* Now take care of duping the CDB information. */
+ CDB_LOCKING_COPY(dbp, dbc_orig, dbc_n);
+
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
+
+ *dbcp = dbc_n;
+ return (0);
+
+err: (void)dbc_n->c_close(dbc_n);
+ return (ret);
+}
+
+/*
+ * __db_c_newopd --
+ * Create a new off-page duplicate cursor.
+ *
+ * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **));
+ */
+int
+__db_c_newopd(dbc_parent, root, oldopd, dbcp)
+ DBC *dbc_parent;
+ db_pgno_t root;
+ DBC *oldopd;
+ DBC **dbcp;
+{
+ DB *dbp;
+ DBC *opd;
+ DBTYPE dbtype;
+ int ret;
+
+ dbp = dbc_parent->dbp;
+ dbtype = (dbp->dup_compare == NULL) ? DB_RECNO : DB_BTREE;
+
+ /*
+ * On failure, we want to default to returning the old off-page dup
+ * cursor, if any; our caller can't be left with a dangling pointer
+ * to a freed cursor. On error the only allowable behavior is to
+ * close the cursor (and the old OPD cursor it in turn points to), so
+ * this should be safe.
+ */
+ *dbcp = oldopd;
+
+ if ((ret = __db_icursor(dbp,
+ dbc_parent->txn, dbtype, root, 1, dbc_parent->locker, &opd)) != 0)
+ return (ret);
+
+ /* !!!
+ * If the parent is a DBC_WRITER, this won't copy anything. That's
+ * not actually a problem--we only need lock information in an
+ * off-page dup cursor in order to upgrade at cursor close time
+ * if we've done a delete, but WRITERs don't need to upgrade.
+ */
+ CDB_LOCKING_COPY(dbp, dbc_parent, opd);
+
+ *dbcp = opd;
+
+ /*
+ * Check to see if we already have an off-page dup cursor that we've
+ * passed in. If we do, close it. It'd be nice to use it again
+ * if it's a cursor belonging to the right tree, but if we're doing
+ * a cursor-relative operation this might not be safe, so for now
+ * we'll take the easy way out and always close and reopen.
+ *
+ * Note that under no circumstances do we want to close the old
+ * cursor without returning a valid new one; we don't want to
+ * leave the main cursor in our caller with a non-NULL pointer
+ * to a freed off-page dup cursor.
+ */
+ if (oldopd != NULL && (ret = oldopd->c_close(oldopd)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __db_c_get --
+ * Get using a cursor.
+ *
+ * PUBLIC: int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_get(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DBC *dbc, *dbc_n, *opd;
+ DBC_INTERNAL *cp, *cp_n;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t pgno;
+ u_int32_t multi, tmp_dirty, tmp_flags, tmp_rmw;
+ u_int8_t type;
+ int ret, t_ret;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ mpf = dbp->mpf;
+ dbc_n = NULL;
+ opd = NULL;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_cgetchk(dbp, key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ tmp_rmw = LF_ISSET(DB_RMW);
+ LF_CLR(DB_RMW);
+
+ tmp_dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_DIRTY_READ);
+
+ multi = LF_ISSET(DB_MULTIPLE|DB_MULTIPLE_KEY);
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
+
+ DEBUG_LREAD(dbc_arg, dbc_arg->txn, "db_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ /*
+ * Return a cursor's record number. It has nothing to do with the
+ * cursor get code except that it was put into the interface.
+ */
+ if (flags == DB_GET_RECNO) {
+ if (tmp_rmw)
+ F_SET(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+ ret = __bam_c_rget(dbc_arg, data);
+ if (tmp_rmw)
+ F_CLR(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ return (ret);
+ }
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the DB_RMW flag was specified and the get
+ * operation is done in an off-page duplicate tree, call the primary
+ * cursor's upgrade routine first.
+ */
+ cp = dbc_arg->internal;
+ if (cp->opd != NULL &&
+ (flags == DB_CURRENT || flags == DB_GET_BOTHC ||
+ flags == DB_NEXT || flags == DB_NEXT_DUP || flags == DB_PREV)) {
+ if (tmp_rmw && (ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_idup(cp->opd, &opd, DB_POSITIONI)) != 0)
+ return (ret);
+
+ switch (ret =
+ opd->c_am_get(opd, key, data, flags, NULL)) {
+ case 0:
+ goto done;
+ case DB_NOTFOUND:
+ /*
+ * Translate DB_NOTFOUND failures for the DB_NEXT and
+ * DB_PREV operations into a subsequent operation on
+ * the parent cursor.
+ */
+ if (flags == DB_NEXT || flags == DB_PREV) {
+ if ((ret = opd->c_close(opd)) != 0)
+ goto err;
+ opd = NULL;
+ break;
+ }
+ goto err;
+ default:
+ goto err;
+ }
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * upgrade the lock as required, and call the underlying function.
+ */
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_GET_BOTHC:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_POSITIONI;
+ break;
+ default:
+ tmp_flags = 0;
+ break;
+ }
+
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else {
+ ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+
+ if (ret != 0)
+ goto err;
+ COPY_RET_MEM(dbc_arg, dbc_n);
+ }
+
+ if (tmp_rmw)
+ F_SET(dbc_n, DBC_RMW);
+
+ switch (multi) {
+ case DB_MULTIPLE:
+ F_SET(dbc_n, DBC_MULTIPLE);
+ break;
+ case DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE_KEY);
+ break;
+ case DB_MULTIPLE | DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
+ break;
+ case 0:
+ break;
+ }
+
+ pgno = PGNO_INVALID;
+ ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno);
+ if (tmp_rmw)
+ F_CLR(dbc_n, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ F_CLR(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
+ if (ret != 0)
+ goto err;
+
+ cp_n = dbc_n->internal;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ if ((ret = __db_c_newopd(dbc_arg,
+ pgno, cp_n->opd, &cp_n->opd)) != 0)
+ goto err;
+
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ case DB_SET:
+ case DB_SET_RECNO:
+ case DB_SET_RANGE:
+ tmp_flags = DB_FIRST;
+ break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ tmp_flags = DB_LAST;
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTHC:
+ case DB_GET_BOTH_RANGE:
+ tmp_flags = flags;
+ break;
+ default:
+ ret =
+ __db_unknown_flag(dbp->dbenv, "__db_c_get", flags);
+ goto err;
+ }
+ if ((ret = cp_n->opd->c_am_get(
+ cp_n->opd, key, data, tmp_flags, NULL)) != 0)
+ goto err;
+ }
+
+done: /*
+ * Return a key/data item. The only exception is that we don't return
+ * a key if the user already gave us one, that is, if the DB_SET flag
+ * was set. The DB_SET flag is necessary. In a Btree, the user's key
+ * doesn't have to be the same as the key stored the tree, depending on
+ * the magic performed by the comparison function. As we may not have
+ * done any key-oriented operation here, the page reference may not be
+ * valid. Fill it in as necessary. We don't have to worry about any
+ * locks, the cursor must already be holding appropriate locks.
+ *
+ * XXX
+ * If not a Btree and DB_SET_RANGE is set, we shouldn't return a key
+ * either, should we?
+ */
+ cp_n = dbc_n == NULL ? dbc_arg->internal : dbc_n->internal;
+ if (!F_ISSET(key, DB_DBT_ISSET)) {
+ if (cp_n->page == NULL && (ret =
+ mpf->get(mpf, &cp_n->pgno, 0, &cp_n->page)) != 0)
+ goto err;
+
+ if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx,
+ key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0)
+ goto err;
+ }
+ if (multi != 0) {
+ /*
+ * Even if fetching from the OPD cursor we need a duplicate
+ * primary cursor if we are going after multiple keys.
+ */
+ if (dbc_n == NULL) {
+ /*
+ * Non-"_KEY" DB_MULTIPLE doesn't move the main cursor,
+ * so it's safe to just use dbc_arg, unless dbc_arg
+ * has an open OPD cursor whose state might need to
+ * be preserved.
+ */
+ if ((!(multi & DB_MULTIPLE_KEY) &&
+ dbc_arg->internal->opd == NULL) ||
+ F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else {
+ if ((ret = __db_c_idup(dbc_arg,
+ &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ if ((ret = dbc_n->c_am_get(dbc_n,
+ key, data, DB_CURRENT, &pgno)) != 0)
+ goto err;
+ }
+ cp_n = dbc_n->internal;
+ }
+
+ /*
+ * If opd is set then we dupped the opd that we came in with.
+ * When we return we may have a new opd if we went to another
+ * key.
+ */
+ if (opd != NULL) {
+ DB_ASSERT(cp_n->opd == NULL);
+ cp_n->opd = opd;
+ opd = NULL;
+ }
+
+ /*
+ * Bulk get doesn't use __db_retcopy, so data.size won't
+ * get set up unless there is an error. Assume success
+ * here. This is the only call to c_am_bulk, and it avoids
+ * setting it exactly the same everywhere. If we have an
+ * ENOMEM error, it'll get overwritten with the needed value.
+ */
+ data->size = data->ulen;
+ ret = dbc_n->c_am_bulk(dbc_n, data, flags | multi);
+ } else if (!F_ISSET(data, DB_DBT_ISSET)) {
+ dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n;
+ type = TYPE(dbc->internal->page);
+ ret = __db_ret(dbp, dbc->internal->page, dbc->internal->indx +
+ (type == P_LBTREE || type == P_HASH ? O_INDX : 0),
+ data, &dbc_arg->rdata->data, &dbc_arg->rdata->ulen);
+ }
+
+err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */
+ F_CLR(key, DB_DBT_ISSET);
+ F_CLR(data, DB_DBT_ISSET);
+
+ /* Cleanup and cursor resolution. */
+ if (opd != NULL) {
+ if ((t_ret = __db_c_cleanup(
+ dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ }
+
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+ return (ret);
+}
+
+/*
+ * __db_c_put --
+ * Put using a cursor.
+ *
+ * PUBLIC: int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_put(dbc_arg, key, data, flags)
+ DBC *dbc_arg;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB *dbp, *sdbp;
+ DBC *dbc_n, *oldopd, *opd, *sdbc, *pdbc;
+ DBT olddata, oldpkey, oldskey, newdata, pkey, save_skey, skey, temp;
+ db_pgno_t pgno;
+ int cmp, have_oldrec, ispartial, nodel, re_pad, ret, rmw, t_ret;
+ u_int32_t re_len, size, tmp_flags;
+
+ /*
+ * Cursor Cleanup Note:
+ * All of the cursors passed to the underlying access methods by this
+ * routine are duplicated cursors. On return, any referenced pages
+ * will be discarded, and, if the cursor is not intended to be used
+ * again, the close function will be called. So, pages/locks that
+ * the cursor references do not need to be resolved by the underlying
+ * functions.
+ */
+ dbp = dbc_arg->dbp;
+ sdbp = NULL;
+ pdbc = dbc_n = NULL;
+ memset(&newdata, 0, sizeof(DBT));
+
+ PANIC_CHECK(dbp->dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret = __db_cputchk(dbp,
+ key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc_arg->txn, dbc_arg->locker, 0)) != 0)
+ return (ret);
+
+ /*
+ * Putting to secondary indices is forbidden; when we need
+ * to internally update one, we'll call this with a private
+ * synonym for DB_KEYLAST, DB_UPDATE_SECONDARY, which does
+ * the right thing but won't return an error from cputchk().
+ */
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+
+ DEBUG_LWRITE(dbc_arg, dbc_arg->txn, "db_c_put",
+ flags == DB_KEYFIRST || flags == DB_KEYLAST ||
+ flags == DB_NODUPDATA ? key : NULL, data, flags);
+
+ CDB_LOCKING_INIT(dbp, dbc_arg);
+
+ /*
+ * Check to see if we are a primary and have secondary indices.
+ * If we are not, we save ourselves a good bit of trouble and
+ * just skip to the "normal" put.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL)
+ goto skip_s_update;
+
+ /*
+ * We have at least one secondary which we may need to update.
+ *
+ * There is a rather vile locking issue here. Secondary gets
+ * will always involve acquiring a read lock in the secondary,
+ * then acquiring a read lock in the primary. Ideally, we
+ * would likewise perform puts by updating all the secondaries
+ * first, then doing the actual put in the primary, to avoid
+ * deadlock (since having multiple threads doing secondary
+ * gets and puts simultaneously is probably a common case).
+ *
+ * However, if this put is a put-overwrite--and we have no way to
+ * tell in advance whether it will be--we may need to delete
+ * an outdated secondary key. In order to find that old
+ * secondary key, we need to get the record we're overwriting,
+ * before we overwrite it.
+ *
+ * (XXX: It would be nice to avoid this extra get, and have the
+ * underlying put routines somehow pass us the old record
+ * since they need to traverse the tree anyway. I'm saving
+ * this optimization for later, as it's a lot of work, and it
+ * would be hard to fit into this locking paradigm anyway.)
+ *
+ * The simple thing to do would be to go get the old record before
+ * we do anything else. Unfortunately, though, doing so would
+ * violate our "secondary, then primary" lock acquisition
+ * ordering--even in the common case where no old primary record
+ * exists, we'll still acquire and keep a lock on the page where
+ * we're about to do the primary insert.
+ *
+ * To get around this, we do the following gyrations, which
+ * hopefully solve this problem in the common case:
+ *
+ * 1) If this is a c_put(DB_CURRENT), go ahead and get the
+ * old record. We already hold the lock on this page in
+ * the primary, so no harm done, and we'll need the primary
+ * key (which we weren't passed in this case) to do any
+ * secondary puts anyway.
+ *
+ * 2) If we're doing a partial put, we need to perform the
+ * get on the primary key right away, since we don't have
+ * the whole datum that the secondary key is based on.
+ * We may also need to pad out the record if the primary
+ * has a fixed record length.
+ *
+ * 3) Loop through the secondary indices, putting into each a
+ * new secondary key that corresponds to the new record.
+ *
+ * 4) If we haven't done so in (1) or (2), get the old primary
+ * key/data pair. If one does not exist--the common case--we're
+ * done with secondary indices, and can go straight on to the
+ * primary put.
+ *
+ * 5) If we do have an old primary key/data pair, however, we need
+ * to loop through all the secondaries a second time and delete
+ * the old secondary in each.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&olddata, 0, sizeof(DBT));
+ have_oldrec = nodel = 0;
+
+ /*
+ * Primary indices can't have duplicates, so only DB_CURRENT,
+ * DB_KEYFIRST, and DB_KEYLAST make any sense. Other flags
+ * should have been caught by the checking routine, but
+ * add a sprinkling of paranoia.
+ */
+ DB_ASSERT(flags == DB_CURRENT ||
+ flags == DB_KEYFIRST || flags == DB_KEYLAST);
+
+ /*
+ * We'll want to use DB_RMW in a few places, but it's only legal
+ * when locking is on.
+ */
+ rmw = STD_LOCKING(dbc_arg) ? DB_RMW : 0;
+
+ if (flags == DB_CURRENT) { /* Step 1. */
+ /*
+ * This is safe to do on the cursor we already have;
+ * error or no, it won't move.
+ *
+ * We use DB_RMW for all of these gets because we'll be
+ * writing soon enough in the "normal" put code. In
+ * transactional databases we'll hold those write locks
+ * even if we close the cursor we're reading with.
+ */
+ ret = dbc_arg->c_get(dbc_arg,
+ &pkey, &olddata, rmw | DB_CURRENT);
+ if (ret == DB_KEYEMPTY) {
+ nodel = 1; /*
+ * We know we don't need a delete
+ * in the secondary.
+ */
+ have_oldrec = 1; /* We've looked for the old record. */
+ } else if (ret != 0)
+ goto err;
+ else
+ have_oldrec = 1;
+
+ } else {
+ /* So we can just use &pkey everywhere instead of key. */
+ pkey.data = key->data;
+ pkey.size = key->size;
+ }
+
+ /*
+ * Check for partial puts (step 2).
+ */
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (!have_oldrec && !nodel) {
+ /*
+ * We're going to have to search the tree for the
+ * specified key. Dup a cursor (so we have the same
+ * locking info) and do a c_get.
+ */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+
+ /* We should have gotten DB_CURRENT in step 1. */
+ DB_ASSERT(flags != DB_CURRENT);
+
+ ret = pdbc->c_get(pdbc,
+ &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+
+ have_oldrec = 1;
+ }
+
+ /*
+ * Now build the new datum from olddata and the partial
+ * data we were given.
+ */
+ if ((ret =
+ __db_buildpartial(dbp, &olddata, data, &newdata)) != 0)
+ goto err;
+ ispartial = 1;
+ } else
+ ispartial = 0;
+
+ /*
+ * Handle fixed-length records. If the primary database has
+ * fixed-length records, we need to pad out the datum before
+ * we pass it into the callback function; we always index the
+ * "real" record.
+ */
+ if ((dbp->type == DB_RECNO && F_ISSET(dbp, DB_AM_FIXEDLEN)) ||
+ (dbp->type == DB_QUEUE)) {
+ if (dbp->type == DB_QUEUE) {
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ re_pad = ((QUEUE *)dbp->q_internal)->re_pad;
+ } else {
+ re_len = ((BTREE *)dbp->bt_internal)->re_len;
+ re_pad = ((BTREE *)dbp->bt_internal)->re_pad;
+ }
+
+ size = ispartial ? newdata.size : data->size;
+ if (size > re_len) {
+ __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)size);
+ ret = EINVAL;
+ goto err;
+ } else if (size < re_len) {
+ /*
+ * If we're not doing a partial put, copy
+ * data->data into newdata.data, then pad out
+ * newdata.data.
+ *
+ * If we're doing a partial put, the data
+ * we want are already in newdata.data; we
+ * just need to pad.
+ *
+ * Either way, realloc is safe.
+ */
+ if ((ret = __os_realloc(dbp->dbenv, re_len,
+ &newdata.data)) != 0)
+ goto err;
+ if (!ispartial)
+ memcpy(newdata.data, data->data, size);
+ memset((u_int8_t *)newdata.data + size, re_pad,
+ re_len - size);
+ newdata.size = re_len;
+ ispartial = 1;
+ }
+ }
+
+ /*
+ * Loop through the secondaries. (Step 3.)
+ *
+ * Note that __db_s_first and __db_s_next will take care of
+ * thread-locking and refcounting issues.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary, to get the
+ * appropriate secondary key.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--don't
+ * put this key in the secondary. Just
+ * move on to the next one--we'll handle
+ * any necessary deletes in step 5.
+ */
+ continue;
+ else
+ goto err;
+ }
+
+ /*
+ * Save the DBT we just got back from the callback function
+ * off; we want to pass its value into c_get functions
+ * that may stomp on a buffer the callback function
+ * allocated.
+ */
+ memset(&save_skey, 0, sizeof(DBT)); /* Paranoia. */
+ save_skey = skey;
+
+ /*
+ * Open a cursor in this secondary.
+ *
+ * Use the same locker ID as our primary cursor, so that
+ * we're guaranteed that the locks don't conflict (e.g. in CDB
+ * or if we're subdatabases that share and want to lock a
+ * metadata page).
+ */
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+
+ /*
+ * If we're in CDB, updates will fail since the new cursor
+ * isn't a writer. However, we hold the WRITE lock in the
+ * primary and will for as long as our new cursor lasts,
+ * and the primary and secondary share a lock file ID,
+ * so it's safe to consider this a WRITER. The close
+ * routine won't try to put anything because we don't
+ * really have a lock.
+ */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * There are three cases here--
+ * 1) The secondary supports sorted duplicates.
+ * If we attempt to put a secondary/primary pair
+ * that already exists, that's a duplicate duplicate,
+ * and c_put will return DB_KEYEXIST (see __db_duperr).
+ * This will leave us with exactly one copy of the
+ * secondary/primary pair, and this is just right--we'll
+ * avoid deleting it later, as the old and new secondaries
+ * will match (since the old secondary is the dup dup
+ * that's already there).
+ * 2) The secondary supports duplicates, but they're not
+ * sorted. We need to avoid putting a duplicate
+ * duplicate, because the matching old and new secondaries
+ * will prevent us from deleting anything and we'll
+ * wind up with two secondary records that point to the
+ * same primary key. Do a c_get(DB_GET_BOTH); if
+ * that returns 0, skip the put.
+ * 3) The secondary doesn't support duplicates at all.
+ * In this case, secondary keys must be unique; if
+ * another primary key already exists for this
+ * secondary key, we have to either overwrite it or
+ * not put this one, and in either case we've
+ * corrupted the secondary index. Do a c_get(DB_SET).
+ * If the secondary/primary pair already exists, do
+ * nothing; if the secondary exists with a different
+ * primary, return an error; and if the secondary
+ * does not exist, put it.
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ /* Case 3. */
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc,
+ &skey, &oldpkey, rmw | DB_SET);
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, &pkey);
+ __os_ufree(sdbp->dbenv, oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Put results in a non-unique secondary key in an ",
+ "index not configured to support duplicates");
+ ret = EINVAL;
+ goto skipput;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto skipput;
+ } else if (!F_ISSET(sdbp, DB_AM_DUPSORT))
+ /* Case 2. */
+ if ((ret = sdbc->c_real_get(sdbc,
+ &skey, &pkey, rmw | DB_GET_BOTH)) == 0)
+ goto skipput;
+
+ ret = sdbc->c_put(sdbc, &skey, &pkey, DB_UPDATE_SECONDARY);
+
+ /*
+ * We don't know yet whether this was a put-overwrite that
+ * in fact changed nothing. If it was, we may get DB_KEYEXIST.
+ * This is not an error.
+ */
+ if (ret == DB_KEYEXIST)
+ ret = 0;
+
+skipput: FREE_IF_NEEDED(sdbp, &save_skey)
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+ if (ret != 0)
+ goto err;
+
+ /* If still necessary, go get the old primary key/data. (Step 4.) */
+ if (!have_oldrec) {
+ /* See the comments in step 2. This is real familiar. */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(flags != DB_CURRENT);
+ pkey.data = key->data;
+ pkey.size = key->size;
+ ret = pdbc->c_get(pdbc, &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ have_oldrec = 1;
+ }
+
+ /*
+ * If we don't follow this goto, we do in fact have an old record
+ * we may need to go delete. (Step 5).
+ */
+ if (nodel)
+ goto skip_s_update;
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary to get the
+ * old secondary key.
+ */
+ memset(&oldskey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, &olddata, &oldskey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--there's
+ * nothing to delete. Go on to the next
+ * secondary.
+ */
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0 &&
+ ret != DB_DONOTINDEX)
+ goto err;
+
+ /*
+ * If there is no new secondary key, or if the old secondary
+ * key is different from the new secondary key, then
+ * we need to delete the old one.
+ *
+ * Note that bt_compare is (and must be) set no matter
+ * what access method we're in.
+ */
+ sdbc = NULL;
+ if (ret == DB_DONOTINDEX ||
+ ((BTREE *)sdbp->bt_internal)->bt_compare(sdbp,
+ &oldskey, &skey) != 0) {
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Don't let c_get(DB_GET_BOTH) stomp on
+ * any secondary key value that the callback
+ * function may have allocated. Use a temp
+ * DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = oldskey.data;
+ temp.size = oldskey.size;
+ if ((ret = sdbc->c_real_get(sdbc,
+ &temp, &pkey, rmw | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ FREE_IF_NEEDED(sdbp, &oldskey);
+ if (sdbc != NULL && (t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Secondary index updates are now done. On to the "real" stuff. */
+
+skip_s_update:
+ /*
+ * If we have an off-page duplicates cursor, and the operation applies
+ * to it, perform the operation. Duplicate the cursor and call the
+ * underlying function.
+ *
+ * Off-page duplicate trees are locked in the primary tree, that is,
+ * we acquire a write lock in the primary tree and no locks in the
+ * off-page dup tree. If the put operation is done in an off-page
+ * duplicate tree, call the primary cursor's upgrade routine first.
+ */
+ if (dbc_arg->internal->opd != NULL &&
+ (flags == DB_AFTER || flags == DB_BEFORE || flags == DB_CURRENT)) {
+ /*
+ * A special case for hash off-page duplicates. Hash doesn't
+ * support (and is documented not to support) put operations
+ * relative to a cursor which references an already deleted
+ * item. For consistency, apply the same criteria to off-page
+ * duplicates as well.
+ */
+ if (dbc_arg->dbtype == DB_HASH && F_ISSET(
+ ((BTREE_CURSOR *)(dbc_arg->internal->opd->internal)),
+ C_DELETED)) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if ((ret = dbc_arg->c_am_writelock(dbc_arg)) != 0)
+ return (ret);
+ if ((ret = __db_c_dup(dbc_arg, &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ opd = dbc_n->internal->opd;
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ goto done;
+ }
+
+ /*
+ * Perform an operation on the main cursor. Duplicate the cursor,
+ * and call the underlying function.
+ *
+ * XXX: MARGO
+ *
+ tmp_flags = flags == DB_AFTER ||
+ flags == DB_BEFORE || flags == DB_CURRENT ? DB_POSITIONI : 0;
+ */
+ tmp_flags = DB_POSITIONI;
+
+ /*
+ * If this cursor is going to be closed immediately, we don't
+ * need to take precautions to clean it up on error.
+ */
+ if (F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0)
+ goto err;
+
+ pgno = PGNO_INVALID;
+ if ((ret = dbc_n->c_am_put(dbc_n, key, data, flags, &pgno)) != 0)
+ goto err;
+
+ /*
+ * We may be referencing a new off-page duplicates tree. Acquire
+ * a new cursor and call the underlying function.
+ */
+ if (pgno != PGNO_INVALID) {
+ oldopd = dbc_n->internal->opd;
+ if ((ret = __db_c_newopd(dbc_arg, pgno, oldopd, &opd)) != 0) {
+ dbc_n->internal->opd = opd;
+ goto err;
+ }
+
+ dbc_n->internal->opd = opd;
+
+ if ((ret = opd->c_am_put(
+ opd, key, data, flags, NULL)) != 0)
+ goto err;
+ }
+
+done:
+err: /* Cleanup and cursor resolution. */
+ if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* If newdata was used, free its buffer. */
+ if (newdata.data != NULL)
+ __os_free(dbp->dbenv, newdata.data);
+
+ CDB_LOCKING_DONE(dbp, dbc_arg);
+
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0)
+ return (t_ret);
+
+ return (ret);
+}
+
+/*
+ * __db_duperr()
+ * Error message: we don't currently support sorted duplicate duplicates.
+ * PUBLIC: int __db_duperr __P((DB *, u_int32_t));
+ */
+int
+__db_duperr(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+
+ /*
+ * If we run into this error while updating a secondary index,
+ * don't yell--there's no clean way to pass DB_NODUPDATA in along
+ * with DB_UPDATE_SECONDARY, but we may run into this problem
+ * in a normal, non-error course of events.
+ *
+ * !!!
+ * If and when we ever permit duplicate duplicates in sorted-dup
+ * databases, we need to either change the secondary index code
+ * to check for dup dups, or we need to maintain the implicit
+ * "DB_NODUPDATA" behavior for databases with DB_AM_SECONDARY set.
+ */
+ if (flags != DB_NODUPDATA && !F_ISSET(dbp, DB_AM_SECONDARY))
+ __db_err(dbp->dbenv,
+ "Duplicate data items are not supported with sorted data");
+ return (DB_KEYEXIST);
+}
+
+/*
+ * __db_c_cleanup --
+ * Clean up duplicate cursors.
+ */
+static int
+__db_c_cleanup(dbc, dbc_n, failed)
+ DBC *dbc, *dbc_n;
+ int failed;
+{
+ DB *dbp;
+ DBC *opd;
+ DBC_INTERNAL *internal;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ internal = dbc->internal;
+ ret = 0;
+
+ /* Discard any pages we're holding. */
+ if (internal->page != NULL) {
+ if ((t_ret = mpf->put(mpf, internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ internal->page = NULL;
+ }
+ opd = internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If dbc_n is NULL, there's no internal cursor swapping to be done
+ * and no dbc_n to close--we probably did the entire operation on an
+ * offpage duplicate cursor. Just return.
+ *
+ * If dbc and dbc_n are the same, we're either inside a DB->{put/get}
+ * operation, and as an optimization we performed the operation on
+ * the main cursor rather than on a duplicated one, or we're in a
+ * bulk get that can't have moved the cursor (DB_MULTIPLE with the
+ * initial c_get operation on an off-page dup cursor). Just
+ * return--either we know we didn't move the cursor, or we're going
+ * to close it before we return to application code, so we're sure
+ * not to visibly violate the "cursor stays put on error" rule.
+ */
+ if (dbc_n == NULL || dbc == dbc_n)
+ return (ret);
+
+ if (dbc_n->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, dbc_n->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbc_n->internal->page = NULL;
+ }
+ opd = dbc_n->internal->opd;
+ if (opd != NULL && opd->internal->page != NULL) {
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ opd->internal->page = NULL;
+ }
+
+ /*
+ * If we didn't fail before entering this routine or just now when
+ * freeing pages, swap the interesting contents of the old and new
+ * cursors.
+ */
+ if (!failed && ret == 0) {
+ dbc->internal = dbc_n->internal;
+ dbc_n->internal = internal;
+ }
+
+ /*
+ * Close the cursor we don't care about anymore. The close can fail,
+ * but we only expect DB_LOCK_DEADLOCK failures. This violates our
+ * "the cursor is unchanged on error" semantics, but since all you can
+ * do with a DB_LOCK_DEADLOCK failure is close the cursor, I believe
+ * that's OK.
+ *
+ * XXX
+ * There's no way to recover from failure to close the old cursor.
+ * All we can do is move to the new position and return an error.
+ *
+ * XXX
+ * We might want to consider adding a flag to the cursor, so that any
+ * subsequent operations other than close just return an error?
+ */
+ if ((t_ret = dbc_n->c_close(dbc_n)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_c_secondary_get --
+ * This wrapper function for DBC->c_pget() is the DBC->c_get() function
+ * for a secondary index cursor.
+ *
+ * PUBLIC: int __db_c_secondary_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_secondary_get(dbc, skey, data, flags)
+ DBC *dbc;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ return (dbc->c_pget(dbc, skey, NULL, data, flags));
+}
+
+/*
+ * __db_c_pget --
+ * Get a primary key/data pair through a secondary index.
+ *
+ * PUBLIC: int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_pget(dbc, skey, pkey, data, flags)
+ DBC *dbc;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DBC *pdbc;
+ DBT *save_rdata, nullpkey;
+ int pkeymalloc, ret, save_pkey_flags, t_ret;
+
+ sdbp = dbc->dbp;
+ pdbp = sdbp->s_primary;
+ pkeymalloc = t_ret = 0;
+
+ PANIC_CHECK(sdbp->dbenv);
+ if ((ret = __db_cpgetchk(sdbp,
+ skey, pkey, data, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /*
+ * The challenging part of this function is getting the behavior
+ * right for all the various permutations of DBT flags. The
+ * next several blocks handle the various cases we need to
+ * deal with specially.
+ */
+
+ /*
+ * We may be called with a NULL pkey argument, if we've been
+ * wrapped by a 2-DBT get call. If so, we need to use our
+ * own DBT.
+ */
+ if (pkey == NULL) {
+ memset(&nullpkey, 0, sizeof(DBT));
+ pkey = &nullpkey;
+ }
+
+ /*
+ * DB_GET_RECNO is a special case, because we're interested not in
+ * the primary key/data pair, but rather in the primary's record
+ * number.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_GET_RECNO)
+ return (__db_c_pget_recno(dbc, pkey, data, flags));
+
+ /*
+ * If the DBTs we've been passed don't have any of the
+ * user-specified memory management flags set, we want to make sure
+ * we return values using the DBTs dbc->rskey, dbc->rkey, and
+ * dbc->rdata, respectively.
+ *
+ * There are two tricky aspects to this: first, we need to pass
+ * skey and pkey *in* to the initial c_get on the secondary key,
+ * since either or both may be looked at by it (depending on the
+ * get flag). Second, we must not use a normal DB->get call
+ * on the secondary, even though that's what we want to accomplish,
+ * because the DB handle may be free-threaded. Instead,
+ * we open a cursor, then take steps to ensure that we actually use
+ * the rkey/rdata from the *secondary* cursor.
+ *
+ * We accomplish all this by passing in the DBTs we started out
+ * with to the c_get, but having swapped the contents of rskey and
+ * rkey, respectively, into rkey and rdata; __db_ret will treat
+ * them like the normal key/data pair in a c_get call, and will
+ * realloc them as need be (this is "step 1"). Then, for "step 2",
+ * we swap back rskey/rkey/rdata to normal, and do a get on the primary
+ * with the secondary dbc appointed as the owner of the returned-data
+ * memory.
+ *
+ * Note that in step 2, we copy the flags field in case we need to
+ * pass down a DB_DBT_PARTIAL or other flag that is compatible with
+ * letting DB do the memory management.
+ */
+ /* Step 1. */
+ save_rdata = dbc->rdata;
+ dbc->rdata = dbc->rkey;
+ dbc->rkey = dbc->rskey;
+
+ /*
+ * It is correct, though slightly sick, to attempt a partial get
+ * of a primary key. However, if we do so here, we'll never find the
+ * primary record; clear the DB_DBT_PARTIAL field of pkey just
+ * for the duration of the next call.
+ */
+ save_pkey_flags = pkey->flags;
+ F_CLR(pkey, DB_DBT_PARTIAL);
+
+ /*
+ * Now we can go ahead with the meat of this call. First, get the
+ * primary key from the secondary index. (What exactly we get depends
+ * on the flags, but the underlying cursor get will take care of the
+ * dirty work.)
+ */
+ if ((ret = dbc->c_real_get(dbc, skey, pkey, flags)) != 0) {
+ /* Restore rskey/rkey/rdata and return. */
+ pkey->flags = save_pkey_flags;
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+ goto err;
+ }
+
+ /* Restore pkey's flags in case we stomped the PARTIAL flag. */
+ pkey->flags = save_pkey_flags;
+
+ /*
+ * Restore the cursor's rskey, rkey, and rdata DBTs. If DB
+ * is handling the memory management, we now have newly
+ * reallocated buffers and ulens in rkey and rdata which we want
+ * to put in rskey and rkey. save_rdata contains the old value
+ * of dbc->rdata.
+ */
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+
+ /*
+ * Now we're ready for "step 2". If either or both of pkey and
+ * data do not have memory management flags set--that is, if DB is
+ * managing their memory--we need to swap around the rkey/rdata
+ * structures so that we don't wind up trying to use memory managed
+ * by the primary database cursor, which we'll close before we return.
+ *
+ * !!!
+ * If you're carefully following the bouncing ball, you'll note
+ * that in the DB-managed case, the buffer hanging off of pkey is
+ * the same as dbc->rkey->data. This is just fine; we may well
+ * realloc and stomp on it when we return, if we're going a
+ * DB_GET_BOTH and need to return a different partial or key
+ * (depending on the comparison function), but this is safe.
+ *
+ * !!!
+ * We need to use __db_icursor here rather than simply calling
+ * pdbp->cursor, because otherwise, if we're in CDB, we'll
+ * allocate a new locker ID and leave ourselves open to deadlocks.
+ * (Even though we're only acquiring read locks, we'll still block
+ * if there are any waiters.)
+ */
+ if ((ret = __db_icursor(pdbp,
+ dbc->txn, pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /*
+ * We're about to use pkey a second time. If DB_DBT_MALLOC
+ * is set on it, we'll leak the memory we allocated the first time.
+ * Thus, set DB_DBT_REALLOC instead so that we reuse that memory
+ * instead of leaking it.
+ *
+ * !!!
+ * This assumes that the user must always specify a compatible
+ * realloc function if a malloc function is specified. I think
+ * this is a reasonable requirement.
+ */
+ if (F_ISSET(pkey, DB_DBT_MALLOC)) {
+ F_CLR(pkey, DB_DBT_MALLOC);
+ F_SET(pkey, DB_DBT_REALLOC);
+ pkeymalloc = 1;
+ }
+
+ /*
+ * Do the actual get. Set DBC_TRANSIENT since we don't care
+ * about preserving the position on error, and it's faster.
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ F_SET(pdbc, DBC_TRANSIENT);
+ SET_RET_MEM(pdbc, dbc);
+ ret = pdbc->c_get(pdbc, pkey, data, DB_SET);
+
+ /*
+ * If the item wasn't found in the primary, this is a bug;
+ * our secondary has somehow gotten corrupted, and contains
+ * elements that don't correspond to anything in the primary.
+ * Complain.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ /* Now close the primary cursor. */
+ t_ret = pdbc->c_close(pdbc);
+
+err: if (pkeymalloc) {
+ /*
+ * If pkey had a MALLOC flag, we need to restore it;
+ * otherwise, if the user frees the buffer but reuses
+ * the DBT without NULL'ing its data field or changing
+ * the flags, we may drop core.
+ */
+ F_CLR(pkey, DB_DBT_REALLOC);
+ F_SET(pkey, DB_DBT_MALLOC);
+ }
+ return (t_ret == 0 ? ret : t_ret);
+}
+
+/*
+ * __db_c_pget_recno --
+ * Perform a DB_GET_RECNO c_pget on a secondary index. Returns
+ * the secondary's record number in the pkey field and the primary's
+ * in the data field.
+ */
+static int
+__db_c_pget_recno(sdbc, pkey, data, flags)
+ DBC *sdbc;
+ DBT *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DB_ENV *dbenv;
+ DBC *pdbc;
+ DBT discardme, primary_key;
+ db_recno_t oob;
+ u_int32_t rmw;
+ int ret, t_ret;
+
+ sdbp = sdbc->dbp;
+ pdbp = sdbp->s_primary;
+ dbenv = sdbp->dbenv;
+ pdbc = NULL;
+ ret = t_ret = 0;
+
+ rmw = LF_ISSET(DB_RMW);
+
+ memset(&discardme, 0, sizeof(DBT));
+ F_SET(&discardme, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ oob = RECNO_OOB;
+
+ /*
+ * If the primary is an rbtree, we want its record number, whether
+ * or not the secondary is one too. Fetch the recno into "data".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "data".
+ */
+ if (F_ISSET(pdbp, DB_AM_RECNUM)) {
+ /*
+ * Get the primary key, so we can find the record number
+ * in the primary. (We're uninterested in the secondary key.)
+ */
+ memset(&primary_key, 0, sizeof(DBT));
+ F_SET(&primary_key, DB_DBT_MALLOC);
+ if ((ret = sdbc->c_real_get(sdbc,
+ &discardme, &primary_key, rmw | DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Open a cursor on the primary, set it to the right record,
+ * and fetch its recno into "data".
+ *
+ * (See __db_c_pget for a comment on the use of __db_icursor.)
+ *
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ if ((ret = __db_icursor(pdbp, sdbc->txn,
+ pdbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto perr;
+ SET_RET_MEM(pdbc, sdbc);
+ if ((ret = pdbc->c_get(pdbc,
+ &primary_key, &discardme, rmw | DB_SET)) != 0)
+ goto perr;
+
+ ret = pdbc->c_get(pdbc, &discardme, data, rmw | DB_GET_RECNO);
+
+perr: __os_ufree(sdbp->dbenv, primary_key.data);
+ if (pdbc != NULL &&
+ (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ } else if ((ret = __db_retcopy(dbenv, data, &oob,
+ sizeof(oob), &sdbc->rkey->data, &sdbc->rkey->ulen)) != 0)
+ return (ret);
+
+ /*
+ * If the secondary is an rbtree, we want its record number, whether
+ * or not the primary is one too. Fetch the recno into "pkey".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "pkey".
+ */
+ if (F_ISSET(sdbp, DB_AM_RECNUM))
+ return (sdbc->c_real_get(sdbc, &discardme, pkey, flags));
+ else
+ return (__db_retcopy(dbenv, pkey, &oob,
+ sizeof(oob), &sdbc->rdata->data, &sdbc->rdata->ulen));
+}
+
+/*
+ * __db_wrlock_err -- do not have a write lock.
+ */
+static int
+__db_wrlock_err(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "Write attempted on read-only cursor");
+ return (EPERM);
+}
+
+/*
+ * __db_c_del_secondary --
+ * Perform a delete operation on a secondary index: call through
+ * to the primary and delete the primary record that this record
+ * points to.
+ *
+ * Note that deleting the primary record will call c_del on all
+ * the secondaries, including this one; thus, it is not necessary
+ * to execute both this function and an actual delete.
+ *
+ */
+static int
+__db_c_del_secondary(dbc)
+ DBC *dbc;
+{
+ DB *pdbp;
+ DBC *pdbc;
+ DBT skey, pkey;
+ int ret, t_ret;
+
+ memset(&skey, 0, sizeof(DBT));
+ memset(&pkey, 0, sizeof(DBT));
+
+ /*
+ * Get the current item that we're pointing at.
+ * We don't actually care about the secondary key, just
+ * the primary.
+ */
+ F_SET(&skey, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = dbc->c_real_get(dbc,
+ &skey, &pkey, DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Create a cursor on the primary with our locker ID,
+ * so that when it calls back, we don't conflict.
+ *
+ * We create a cursor explicitly because there's no
+ * way to specify the same locker ID if we're using
+ * locking but not transactions if we use the DB->del
+ * interface. This shouldn't be any less efficient
+ * anyway.
+ */
+ pdbp = dbc->dbp->s_primary;
+ if ((ret = __db_icursor(pdbp, dbc->txn,
+ pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ return (ret);
+
+ /*
+ * See comment in __db_c_put--if we're in CDB,
+ * we already hold the locks we need, and we need to flag
+ * the cursor as a WRITER so we don't run into errors
+ * when we try to delete.
+ */
+ if (CDB_LOCKING(pdbp->dbenv)) {
+ DB_ASSERT(pdbc->mylock.off == LOCK_INVALID);
+ F_SET(pdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the new cursor to the correct primary key. Then
+ * delete it. We don't really care about the datum;
+ * just reuse our skey DBT.
+ *
+ * If the primary get returns DB_NOTFOUND, something is amiss--
+ * every record in the secondary should correspond to some record
+ * in the primary.
+ */
+ if ((ret = pdbc->c_get(pdbc, &pkey, &skey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_SET)) == 0)
+ ret = pdbc->c_del(pdbc, 0);
+ else if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ if ((t_ret = pdbc->c_close(pdbc)) != 0 && ret != 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_c_del_primary --
+ * Perform a delete operation on a primary index. Loop through
+ * all the secondary indices which correspond to this primary
+ * database, and delete any secondary keys that point at the current
+ * record.
+ *
+ * PUBLIC: int __db_c_del_primary __P((DBC *));
+ */
+int
+__db_c_del_primary(dbc)
+ DBC *dbc;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc;
+ DBT data, pkey, skey, temp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+
+ /*
+ * If we're called at all, we have at least one secondary.
+ * (Unfortunately, we can't assert this without grabbing the mutex.)
+ * Get the current record so that we can construct appropriate
+ * secondary keys as needed.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ if ((ret = dbc->c_get(dbc, &pkey, &data, DB_CURRENT)) != 0)
+ return (ret);
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Get the secondary key for this secondary and the current
+ * item.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, &pkey, &data, &skey)) != 0) {
+ /*
+ * If the current item isn't in this index, we
+ * have no work to do. Proceed.
+ */
+ if (ret == DB_DONOTINDEX)
+ continue;
+
+ /* We had a substantive error. Bail. */
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto done;
+ }
+
+ /* Open a secondary cursor. */
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0)
+ goto done;
+ /* See comment above and in __db_c_put. */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the secondary cursor to the appropriate item.
+ * Delete it.
+ *
+ * We want to use DB_RMW if locking is on; it's only
+ * legal then, though.
+ *
+ * !!!
+ * Don't stomp on any callback-allocated buffer in skey
+ * when we do a c_get(DB_GET_BOTH); use a temp DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = skey.data;
+ temp.size = skey.size;
+ if ((ret = sdbc->c_real_get(sdbc, &temp, &pkey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+
+ FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 || ret != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ goto done;
+ }
+ }
+
+done: if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_s_first --
+ * Get the first secondary, if any are present, from the primary.
+ *
+ * PUBLIC: DB *__db_s_first __P((DB *));
+ */
+DB *
+__db_s_first(pdbp)
+ DB *pdbp;
+{
+ DB *sdbp;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ sdbp = LIST_FIRST(&pdbp->s_secondaries);
+
+ /* See __db_s_next. */
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (sdbp);
+}
+
+/*
+ * __db_s_next --
+ * Get the next secondary in the list.
+ *
+ * PUBLIC: int __db_s_next __P((DB **));
+ */
+int
+__db_s_next(sdbpp)
+ DB **sdbpp;
+{
+ DB *sdbp, *pdbp, *closeme;
+ int ret;
+
+ /*
+ * Secondary indices are kept in a linked list, s_secondaries,
+ * off each primary DB handle. If a primary is free-threaded,
+ * this list may only be traversed or modified while the primary's
+ * thread mutex is held.
+ *
+ * The tricky part is that we don't want to hold the thread mutex
+ * across the full set of secondary puts necessary for each primary
+ * put, or we'll wind up essentially single-threading all the puts
+ * to the handle; the secondary puts will each take about as
+ * long as the primary does, and may require I/O. So we instead
+ * hold the thread mutex only long enough to follow one link to the
+ * next secondary, and then we release it before performing the
+ * actual secondary put.
+ *
+ * The only danger here is that we might legitimately close a
+ * secondary index in one thread while another thread is performing
+ * a put and trying to update that same secondary index. To
+ * prevent this from happening, we refcount the secondary handles.
+ * If close is called on a secondary index handle while we're putting
+ * to it, it won't really be closed--the refcount will simply drop,
+ * and we'll be responsible for closing it here.
+ */
+ sdbp = *sdbpp;
+ pdbp = sdbp->s_primary;
+ closeme = NULL;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ closeme = sdbp;
+ }
+ sdbp = LIST_NEXT(sdbp, s_links);
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ *sdbpp = sdbp;
+
+ /*
+ * closeme->close() is a wrapper; call __db_close explicitly.
+ */
+ ret = closeme != NULL ? __db_close(closeme, 0) : 0;
+ return (ret);
+}
+
+/*
+ * __db_s_done --
+ * Properly decrement the refcount on a secondary database handle we're
+ * using, without calling __db_s_next.
+ *
+ * PUBLIC: int __db_s_done __P((DB *));
+ */
+int
+__db_s_done(sdbp)
+ DB *sdbp;
+{
+ DB *pdbp;
+ int doclose;
+
+ pdbp = sdbp->s_primary;
+ doclose = 0;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (doclose ? __db_close(sdbp, 0) : 0);
+}
+
+/*
+ * __db_buildpartial --
+ * Build the record that will result after a partial put is applied to
+ * an existing record.
+ *
+ * This should probably be merged with __bam_build, but that requires
+ * a little trickery if we plan to keep the overflow-record optimization
+ * in that function.
+ */
+static int
+__db_buildpartial(dbp, oldrec, partial, newrec)
+ DB *dbp;
+ DBT *oldrec, *partial, *newrec;
+{
+ int ret;
+ u_int8_t *buf;
+ u_int32_t len, nbytes;
+
+ DB_ASSERT(F_ISSET(partial, DB_DBT_PARTIAL));
+
+ memset(newrec, 0, sizeof(DBT));
+
+ nbytes = __db_partsize(oldrec->size, partial);
+ newrec->size = nbytes;
+
+ if ((ret = __os_malloc(dbp->dbenv, nbytes, &buf)) != 0)
+ return (ret);
+ newrec->data = buf;
+
+ /* Nul or pad out the buffer, for any part that isn't specified. */
+ memset(buf,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? ((BTREE *)dbp->bt_internal)->re_pad :
+ 0, nbytes);
+
+ /* Copy in any leading data from the original record. */
+ memcpy(buf, oldrec->data,
+ partial->doff > oldrec->size ? oldrec->size : partial->doff);
+
+ /* Copy the data from partial. */
+ memcpy(buf + partial->doff, partial->data, partial->size);
+
+ /* Copy any trailing data from the original record. */
+ len = partial->doff + partial->dlen;
+ if (oldrec->size > len)
+ memcpy(buf + partial->doff + partial->size,
+ (u_int8_t *)oldrec->data + len, oldrec->size - len);
+
+ return (0);
+}
+
+/*
+ * __db_partsize --
+ * Given the number of bytes in an existing record and a DBT that
+ * is about to be partial-put, calculate the size of the record
+ * after the put.
+ *
+ * This code is called from __bam_partsize.
+ *
+ * PUBLIC: u_int32_t __db_partsize __P((u_int32_t, DBT *));
+ */
+u_int32_t
+__db_partsize(nbytes, data)
+ u_int32_t nbytes;
+ DBT *data;
+{
+
+ /*
+ * There are really two cases here:
+ *
+ * Case 1: We are replacing some bytes that do not exist (i.e., they
+ * are past the end of the record). In this case the number of bytes
+ * we are replacing is irrelevant and all we care about is how many
+ * bytes we are going to add from offset. So, the new record length
+ * is going to be the size of the new bytes (size) plus wherever those
+ * new bytes begin (doff).
+ *
+ * Case 2: All the bytes we are replacing exist. Therefore, the new
+ * size is the oldsize (nbytes) minus the bytes we are replacing (dlen)
+ * plus the bytes we are adding (size).
+ */
+ if (nbytes < data->doff + data->dlen) /* Case 1 */
+ return (data->doff + data->size);
+
+ return (nbytes + data->size - data->dlen); /* Case 2 */
+}
diff --git a/storage/bdb/db/db_conv.c b/storage/bdb/db/db_conv.c
new file mode 100644
index 00000000000..f731c82d85e
--- /dev/null
+++ b/storage/bdb/db/db_conv.c
@@ -0,0 +1,550 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_conv.c,v 11.38 2002/08/15 03:00:13 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __db_pgin --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgin(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB dummydb, *dbp;
+ DB_PGINFO *pginfo;
+ DB_CIPHER *db_cipher;
+ DB_LSN not_used;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int is_hmac, ret;
+ u_int8_t *chksum, *iv;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
+
+ ret = is_hmac = 0;
+ chksum = iv = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * If checksumming is set on the meta-page, we must set
+ * it in the dbp.
+ */
+ if (FLD_ISSET(((DBMETA *)pp)->metaflags, DBMETA_CHKSUM))
+ F_SET(dbp, DB_AM_CHKSUM);
+ if (((DBMETA *)pp)->encrypt_alg != 0 ||
+ F_ISSET(dbp, DB_AM_ENCRYPT))
+ is_hmac = 1;
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ /*
+ * We assume that we've read a file hole if we have
+ * a zero LSN, zero page number and P_INVALID. Otherwise
+ * we have an invalid page that might contain real data.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) && pagep->pgno == PGNO_INVALID) {
+ sum_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ /*
+ * If we are reading in a non-meta page, then if we have
+ * a db_cipher then we are using hmac.
+ */
+ is_hmac = CRYPTO_ON(dbenv) ? 1 : 0;
+ break;
+ }
+
+ /*
+ * We expect a checksum error if there was a configuration problem.
+ * If there is no configuration problem and we don't get a match,
+ * it's fatal: panic the system.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM) && sum_len != 0)
+ switch (ret = __db_check_chksum(
+ dbenv, db_cipher, chksum, pp, sum_len, is_hmac)) {
+ case 0:
+ break;
+ case -1:
+ if (DBENV_LOGGING(dbenv))
+ __db_cksum_log(
+ dbenv, NULL, &not_used, DB_FLUSH);
+ __db_err(dbenv,
+ "checksum error: catastrophic recovery required");
+ return (__db_panic(dbenv, DB_RUNRECOVERY));
+ default:
+ return (ret);
+ }
+
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ pagep->pgno == PGNO_INVALID) {
+ pg_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if (pg_len != 0 && (ret = db_cipher->decrypt(dbenv,
+ db_cipher->data, iv, ((u_int8_t *)pagep) + pg_off,
+ pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ switch (pagep->type) {
+ case P_INVALID:
+ if (pginfo->type == DB_QUEUE)
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ else
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_HASH:
+ case P_HASHMETA:
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ return (__bam_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_QAMMETA:
+ case P_QAMDATA:
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ default:
+ break;
+ }
+ return (__db_pgfmt(dbenv, pg));
+}
+
+/*
+ * __db_pgout --
+ * Primary page-swap routine.
+ *
+ * PUBLIC: int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__db_pgout(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB dummydb, *dbp;
+ DB_CIPHER *db_cipher;
+ DB_PGINFO *pginfo;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int ret;
+ u_int8_t *chksum, *iv, *key;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
+
+ chksum = iv = key = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ ret = 0;
+ switch (pagep->type) {
+ case P_INVALID:
+ if (pginfo->type == DB_QUEUE)
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
+ else
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_HASH:
+ case P_HASHMETA:
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_BTREEMETA:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ ret = __bam_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
+ case P_QAMMETA:
+ case P_QAMDATA:
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
+ break;
+ default:
+ return (__db_pgfmt(dbenv, pg));
+ }
+ if (ret)
+ return (ret);
+
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ key = db_cipher->mac_key;
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ iv, ((u_int8_t *)pagep) + pg_off, pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ if (F_ISSET(dbp, DB_AM_CHKSUM)) {
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ break;
+ }
+ __db_chksum(pp, sum_len, key, chksum);
+ }
+ return (0);
+}
+
+/*
+ * __db_metaswap --
+ * Byteswap the common part of the meta-data page.
+ *
+ * PUBLIC: void __db_metaswap __P((PAGE *));
+ */
+void
+__db_metaswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ p = (u_int8_t *)pg;
+
+ /* Swap the meta-data information. */
+ SWAP32(p); /* lsn.file */
+ SWAP32(p); /* lsn.offset */
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* magic */
+ SWAP32(p); /* version */
+ SWAP32(p); /* pagesize */
+ p += 4; /* unused, page type, unused, unused */
+ SWAP32(p); /* free */
+ SWAP32(p); /* alloc_lsn part 1 */
+ SWAP32(p); /* alloc_lsn part 2 */
+ SWAP32(p); /* cached key count */
+ SWAP32(p); /* cached record count */
+ SWAP32(p); /* flags */
+}
+
+/*
+ * __db_byteswap --
+ * Byteswap a page.
+ *
+ * PUBLIC: int __db_byteswap
+ * PUBLIC: __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int));
+ */
+int
+__db_byteswap(dbenv, dbp, pg, h, pagesize, pgin)
+ DB_ENV *dbenv;
+ DB *dbp;
+ db_pgno_t pg;
+ PAGE *h;
+ size_t pagesize;
+ int pgin;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ RINTERNAL *ri;
+ db_indx_t i, *inp, len, tmp;
+ u_int8_t *p, *end;
+
+ COMPQUIET(pg, 0);
+
+ inp = P_INP(dbp, h);
+ if (pgin) {
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+
+ switch (h->type) {
+ case P_HASH:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ switch (HPAGE_TYPE(dbp, h, i)) {
+ case H_KEYDATA:
+ break;
+ case H_DUPLICATE:
+ len = LEN_HKEYDATA(dbp, h, pagesize, i);
+ p = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
+ for (end = p + len; p < end;) {
+ if (pgin) {
+ P_16_SWAP(p);
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ } else {
+ memcpy(&tmp,
+ p, sizeof(db_indx_t));
+ SWAP16(p);
+ }
+ p += tmp;
+ SWAP16(p);
+ }
+ break;
+ case H_OFFDUP:
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+ SWAP32(p); /* pgno */
+ break;
+ case H_OFFPAGE:
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
+ SWAP32(p); /* pgno */
+ SWAP32(p); /* tlen */
+ break;
+ }
+
+ }
+
+ /*
+ * The offsets in the inp array are used to determine
+ * the size of entries on a page; therefore they
+ * cannot be converted until we've done all the
+ * entries.
+ */
+ if (!pgin)
+ for (i = 0; i < NUM_ENT(h); i++)
+ M_16_SWAP(inp[i]);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ /*
+ * In the case of on-page duplicates, key information
+ * should only be swapped once.
+ */
+ if (h->type == P_LBTREE && i > 1) {
+ if (pgin) {
+ if (inp[i] == inp[i - 2])
+ continue;
+ } else {
+ M_16_SWAP(inp[i]);
+ if (inp[i] == inp[i - 2])
+ continue;
+ M_16_SWAP(inp[i]);
+ }
+ }
+
+ bk = GET_BKEYDATA(dbp, h, i);
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ M_16_SWAP(bk->len);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bk;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_IBTREE:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ bi = GET_BINTERNAL(dbp, h, i);
+ M_16_SWAP(bi->len);
+ M_32_SWAP(bi->pgno);
+ M_32_SWAP(bi->nrecs);
+
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ bo = (BOVERFLOW *)bi->data;
+ M_32_SWAP(bo->pgno);
+ M_32_SWAP(bo->tlen);
+ break;
+ }
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_IRECNO:
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if (pgin)
+ M_16_SWAP(inp[i]);
+
+ ri = GET_RINTERNAL(dbp, h, i);
+ M_32_SWAP(ri->pgno);
+ M_32_SWAP(ri->nrecs);
+
+ if (!pgin)
+ M_16_SWAP(inp[i]);
+ }
+ break;
+ case P_OVERFLOW:
+ case P_INVALID:
+ /* Nothing to do. */
+ break;
+ default:
+ return (__db_pgfmt(dbenv, pg));
+ }
+
+ if (!pgin) {
+ /* Swap the header information. */
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+ M_32_SWAP(h->prev_pgno);
+ M_32_SWAP(h->next_pgno);
+ M_16_SWAP(h->entries);
+ M_16_SWAP(h->hf_offset);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db/db_dispatch.c b/storage/bdb/db/db_dispatch.c
new file mode 100644
index 00000000000..2cf29ec2f33
--- /dev/null
+++ b/storage/bdb/db/db_dispatch.c
@@ -0,0 +1,1404 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_dispatch.c,v 11.121 2002/09/07 17:36:31 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __db_limbo_fix __P((DB *,
+ DB_TXN *, DB_TXNLIST *, db_pgno_t *, DBMETA *));
+static int __db_limbo_bucket __P((DB_ENV *, DB_TXN *, DB_TXNLIST *));
+static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *));
+static int __db_lock_move __P((DB_ENV *,
+ u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *));
+static int __db_default_getpgnos __P((DB_ENV *, DB_LSN *lsnp, void *));
+static int __db_txnlist_find_internal __P((DB_ENV *, void *, db_txnlist_type,
+ u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int));
+static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
+ int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+
+/*
+ * __db_dispatch --
+ *
+ * This is the transaction dispatch function used by the db access methods.
+ * It is designed to handle the record format used by all the access
+ * methods (the one automatically generated by the db_{h,log,read}.sh
+ * scripts in the tools directory). An application using a different
+ * recovery paradigm will supply a different dispatch function to txn_open.
+ *
+ * PUBLIC: int __db_dispatch __P((DB_ENV *,
+ * PUBLIC: int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)),
+ * PUBLIC: size_t, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
+ DB_ENV *dbenv; /* The environment. */
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize; /* Size of the dtab. */
+ DBT *db; /* The log record upon which to dispatch. */
+ DB_LSN *lsnp; /* The lsn of the record being dispatched. */
+ db_recops redo; /* Redo this op (or undo it). */
+ void *info;
+{
+ DB_LSN prev_lsn;
+ u_int32_t rectype, txnid;
+ int make_call, ret;
+
+ memcpy(&rectype, db->data, sizeof(rectype));
+ memcpy(&txnid, (u_int8_t *)db->data + sizeof(rectype), sizeof(txnid));
+ make_call = ret = 0;
+
+ /* If we don't have a dispatch table, it's hard to dispatch. */
+ DB_ASSERT(dtab != NULL);
+
+ /*
+ * If we find a record that is in the user's number space and they
+ * have specified a recovery routine, let them handle it. If they
+ * didn't specify a recovery routine, then we expect that they've
+ * followed all our rules and registered new recovery functions.
+ */
+ switch (redo) {
+ case DB_TXN_ABORT:
+ case DB_TXN_APPLY:
+ case DB_TXN_PRINT:
+ make_call = 1;
+ break;
+ case DB_TXN_OPENFILES:
+ /*
+ * We collect all the transactions that have
+ * "begin" records, those with no previous LSN,
+ * so that we do not abort partial transactions.
+ * These are known to be undone, otherwise the
+ * log would not have been freeable.
+ */
+ memcpy(&prev_lsn, (u_int8_t *)db->data +
+ sizeof(rectype) + sizeof(txnid), sizeof(prev_lsn));
+ if (txnid != 0 && prev_lsn.file == 0 && (ret =
+ __db_txnlist_add(dbenv, info, txnid, TXN_OK, NULL)) != 0)
+ return (ret);
+
+ /* FALLTHROUGH */
+ case DB_TXN_POPENFILES:
+ if (rectype == DB___dbreg_register ||
+ rectype == DB___txn_ckp || rectype == DB___txn_recycle)
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
+ break;
+ case DB_TXN_BACKWARD_ROLL:
+ /*
+ * Running full recovery in the backward pass. If we've
+ * seen this txnid before and added to it our commit list,
+ * then we do nothing during this pass, unless this is a child
+ * commit record, in which case we need to process it. If
+ * we've never seen it, then we call the appropriate recovery
+ * routine.
+ *
+ * We need to always undo DB___db_noop records, so that we
+ * properly handle any aborts before the file was closed.
+ */
+ switch(rectype) {
+ case DB___txn_regop:
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
+ case DB___fop_file_remove:
+ case DB___txn_child:
+ make_call = 1;
+ break;
+
+ case DB___dbreg_register:
+ if (txnid == 0) {
+ make_call = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ if (txnid != 0 && (ret =
+ __db_txnlist_find(dbenv,
+ info, txnid)) != TXN_COMMIT && ret != TXN_IGNORE) {
+ /*
+ * If not found then, this is an incomplete
+ * abort.
+ */
+ if (ret == TXN_NOTFOUND)
+ return (__db_txnlist_add(dbenv,
+ info, txnid, TXN_IGNORE, lsnp));
+ make_call = 1;
+ if (ret == TXN_OK &&
+ (ret = __db_txnlist_update(dbenv,
+ info, txnid,
+ rectype == DB___txn_xa_regop ?
+ TXN_PREPARE : TXN_ABORT, NULL)) != 0)
+ return (ret);
+ }
+ }
+ break;
+ case DB_TXN_FORWARD_ROLL:
+ /*
+ * In the forward pass, if we haven't seen the transaction,
+ * do nothing, else recover it.
+ *
+ * We need to always redo DB___db_noop records, so that we
+ * properly handle any commits after the file was closed.
+ */
+ switch(rectype) {
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
+ make_call = 1;
+ break;
+
+ default:
+ if (txnid != 0 && (ret = __db_txnlist_find(dbenv,
+ info, txnid)) == TXN_COMMIT)
+ make_call = 1;
+ else if (ret != TXN_IGNORE &&
+ (rectype == DB___ham_metagroup ||
+ rectype == DB___ham_groupalloc ||
+ rectype == DB___db_pg_alloc)) {
+ /*
+ * Because we cannot undo file extensions
+ * all allocation records must be reprocessed
+ * during rollforward in case the file was
+ * just created. It may not have been
+ * present during the backward pass.
+ */
+ make_call = 1;
+ redo = DB_TXN_BACKWARD_ALLOC;
+ } else if (rectype == DB___dbreg_register) {
+ /*
+ * This may be a transaction dbreg_register.
+ * If it is, we only make the call on a COMMIT,
+ * which we checked above. If it's not, then we
+ * should always make the call, because we need
+ * the file open information.
+ */
+ if (txnid == 0)
+ make_call = 1;
+ }
+ }
+ break;
+ case DB_TXN_GETPGNOS:
+ /*
+ * If this is one of DB's own log records, we simply
+ * dispatch.
+ */
+ if (rectype < DB_user_BEGIN) {
+ make_call = 1;
+ break;
+ }
+
+ /*
+ * If we're still here, this is a custom record in an
+ * application that's doing app-specific logging. Such a
+ * record doesn't have a getpgno function for the user
+ * dispatch function to call--the getpgnos functions return
+ * which pages replication needs to lock using the TXN_RECS
+ * structure, which is private and not something we want to
+ * document.
+ *
+ * Thus, we leave any necessary locking for the app's
+ * recovery function to do during the upcoming
+ * DB_TXN_APPLY. Fill in default getpgnos info (we need
+ * a stub entry for every log record that will get
+ * DB_TXN_APPLY'd) and return success.
+ */
+ return (__db_default_getpgnos(dbenv, lsnp, info));
+ default:
+ return (__db_unknown_flag(dbenv, "__db_dispatch", redo));
+ }
+ /*
+ * The switch statement uses ret to receive the return value of
+ * __db_txnlist_find, which returns a large number of different
+ * statuses, none of which we will be returning. For safety,
+ * let's reset this here in case we ever do a "return(ret)"
+ * below in the future.
+ */
+ ret = 0;
+
+ if (make_call) {
+ if (rectype >= DB_user_BEGIN && dbenv->app_dispatch != NULL)
+ return (dbenv->app_dispatch(dbenv, db, lsnp, redo));
+ else {
+ /*
+ * The size of the dtab table argument is the same as
+ * the standard table, use the standard table's size
+ * as our sanity check.
+ */
+ if (rectype > dtabsize || dtab[rectype] == NULL) {
+ __db_err(dbenv,
+ "Illegal record type %lu in log",
+ (u_long)rectype);
+ return (EINVAL);
+ }
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __db_add_recovery --
+ *
+ * PUBLIC: int __db_add_recovery __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *,
+ * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ */
+int
+__db_add_recovery(dbenv, dtab, dtabsize, func, ndx)
+ DB_ENV *dbenv;
+ int (***dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsize;
+ int (*func) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ u_int32_t ndx;
+{
+ size_t i, nsize;
+ int ret;
+
+ /* Check if we have to grow the table. */
+ if (ndx >= *dtabsize) {
+ nsize = ndx + 40;
+ if ((ret =
+ __os_realloc(dbenv, nsize * sizeof((*dtab)[0]), dtab)) != 0)
+ return (ret);
+ for (i = *dtabsize; i < nsize; ++i)
+ (*dtab)[i] = NULL;
+ *dtabsize = nsize;
+ }
+
+ (*dtab)[ndx] = func;
+ return (0);
+}
+
+/*
+ * __db_txnlist_init --
+ * Initialize transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_init __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LSN *, void *));
+ */
+int
+__db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
+ DB_ENV *dbenv;
+ u_int32_t low_txn, hi_txn;
+ DB_LSN *trunc_lsn;
+ void *retp;
+{
+ DB_TXNHEAD *headp;
+ u_int32_t tmp;
+ int ret, size;
+
+ /*
+ * Size a hash table.
+ * If low is zero then we are being called during rollback
+ * and we need only one slot.
+ * Hi maybe lower than low if we have recycled txnid's.
+ * The numbers here are guesses about txn density, we can afford
+ * to look at a few entries in each slot.
+ */
+ if (low_txn == 0)
+ size = 1;
+ else {
+ if (hi_txn < low_txn) {
+ tmp = hi_txn;
+ hi_txn = low_txn;
+ low_txn = tmp;
+ }
+ tmp = hi_txn - low_txn;
+ /* See if we wrapped around. */
+ if (tmp > (TXN_MAXIMUM - TXN_MINIMUM) / 2)
+ tmp = (low_txn - TXN_MINIMUM) + (TXN_MAXIMUM - hi_txn);
+ size = tmp / 5;
+ if (size < 100)
+ size = 100;
+ }
+ if ((ret = __os_malloc(dbenv,
+ sizeof(DB_TXNHEAD) + size * sizeof(headp->head), &headp)) != 0)
+ return (ret);
+
+ memset(headp, 0, sizeof(DB_TXNHEAD) + size * sizeof(headp->head));
+ headp->maxid = hi_txn;
+ headp->generation = 0;
+ headp->nslots = size;
+ headp->gen_alloc = 8;
+ if ((ret = __os_malloc(dbenv, headp->gen_alloc *
+ sizeof(headp->gen_array[0]), &headp->gen_array)) != 0) {
+ __os_free(dbenv, headp);
+ return (ret);
+ }
+ headp->gen_array[0].generation = 0;
+ headp->gen_array[0].txn_min = TXN_MINIMUM;
+ headp->gen_array[0].txn_max = TXN_MAXIMUM;
+ if (trunc_lsn != NULL)
+ headp->trunc_lsn = *trunc_lsn;
+ else
+ ZERO_LSN(headp->trunc_lsn);
+ ZERO_LSN(headp->maxlsn);
+ ZERO_LSN(headp->ckplsn);
+
+ *(void **)retp = headp;
+ return (0);
+}
+
+/*
+ * __db_txnlist_add --
+ * Add an element to our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_add __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, int32_t, DB_LSN *));
+ */
+int
+__db_txnlist_add(dbenv, listp, txnid, status, lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ int32_t status;
+ DB_LSN *lsn;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ return (ret);
+
+ hp = (DB_TXNHEAD *)listp;
+ LIST_INSERT_HEAD(&hp->head[DB_TXNLIST_MASK(hp, txnid)], elp, links);
+
+ elp->type = TXNLIST_TXNID;
+ elp->u.t.txnid = txnid;
+ elp->u.t.status = status;
+ elp->u.t.generation = hp->generation;
+ if (txnid > hp->maxid)
+ hp->maxid = txnid;
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ DB_ASSERT(lsn == NULL ||
+ status != TXN_COMMIT || log_compare(&hp->maxlsn, lsn) >= 0);
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_remove --
+ * Remove an element from our transaction linked list.
+ *
+ * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
+ */
+int
+__db_txnlist_remove(dbenv, listp, txnid)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ return (__db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid,
+ NULL, &entry, 1) == TXN_NOTFOUND ? TXN_NOTFOUND : TXN_OK);
+}
+
+/*
+ * __db_txnlist_ckp --
+ * Used to record the maximum checkpoint that will be retained
+ * after recovery. Typically this is simply the max checkpoint, but
+ * if we are doing client replication recovery or timestamp-based
+ * recovery, we are going to virtually truncate the log and we need
+ * to retain the last checkpoint before the truncation point.
+ *
+ * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
+ */
+void
+__db_txnlist_ckp(dbenv, listp, ckp_lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ DB_LSN *ckp_lsn;
+{
+ DB_TXNHEAD *hp;
+
+ COMPQUIET(dbenv, NULL);
+
+ hp = (DB_TXNHEAD *)listp;
+
+ if (IS_ZERO_LSN(hp->ckplsn) && !IS_ZERO_LSN(hp->maxlsn) &&
+ log_compare(&hp->maxlsn, ckp_lsn) >= 0)
+ hp->ckplsn = *ckp_lsn;
+}
+
+/*
+ * __db_txnlist_end --
+ * Discard transaction linked list. Print out any error messages
+ * for deleted files.
+ *
+ * PUBLIC: void __db_txnlist_end __P((DB_ENV *, void *));
+ */
+void
+__db_txnlist_end(dbenv, listp)
+ DB_ENV *dbenv;
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int i;
+
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return;
+
+ for (i = 0; i < hp->nslots; i++)
+ while (hp != NULL && (p = LIST_FIRST(&hp->head[i])) != NULL) {
+ LIST_REMOVE(p, links);
+ switch (p->type) {
+ case TXNLIST_LSN:
+ __os_free(dbenv, p->u.l.lsn_array);
+ break;
+ default:
+ /*
+ * Possibly an incomplete DB_TXNLIST; just
+ * free it.
+ */
+ break;
+ }
+ __os_free(dbenv, p);
+ }
+
+ if (hp->gen_array != NULL)
+ __os_free(dbenv, hp->gen_array);
+ __os_free(dbenv, listp);
+}
+
+/*
+ * __db_txnlist_find --
+ * Checks to see if a txnid with the current generation is in the
+ * txnid list. This returns TXN_NOTFOUND if the item isn't in the
+ * list otherwise it returns (like __db_txnlist_find_internal)
+ * the status of the transaction. A txnid of 0 means the record
+ * was generated while not in a transaction.
+ *
+ * PUBLIC: int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t));
+ */
+int
+__db_txnlist_find(dbenv, listp, txnid)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+{
+ DB_TXNLIST *entry;
+
+ if (txnid == 0)
+ return (TXN_NOTFOUND);
+ return (__db_txnlist_find_internal(dbenv, listp,
+ TXNLIST_TXNID, txnid, NULL, &entry, 0));
+}
+
+/*
+ * __db_txnlist_update --
+ * Change the status of an existing transaction entry.
+ * Returns TXN_NOTFOUND if no such entry exists.
+ *
+ * PUBLIC: int __db_txnlist_update __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, u_int32_t, DB_LSN *));
+ */
+int
+__db_txnlist_update(dbenv, listp, txnid, status, lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ u_int32_t status;
+ DB_LSN *lsn;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if (txnid == 0)
+ return (TXN_NOTFOUND);
+ hp = (DB_TXNHEAD *)listp;
+ ret = __db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid, NULL, &elp, 0);
+
+ if (ret == TXN_NOTFOUND)
+ return (ret);
+ elp->u.t.status = status;
+
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ return (ret);
+}
+
+/*
+ * __db_txnlist_find_internal --
+ * Find an entry on the transaction list. If the entry is not there or
+ * the list pointer is not initialized we return TXN_NOTFOUND. If the
+ * item is found, we return the status. Currently we always call this
+ * with an initialized list pointer but checking for NULL keeps it general.
+ */
+static int
+__db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete)
+ DB_ENV *dbenv;
+ void *listp;
+ db_txnlist_type type;
+ u_int32_t txnid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ DB_TXNLIST **txnlistp;
+ int delete;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int32_t generation;
+ u_int32_t hash;
+ struct __db_headlink *head;
+ int i, ret;
+
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return (TXN_NOTFOUND);
+
+ switch (type) {
+ case TXNLIST_TXNID:
+ hash = txnid;
+ /* Find the most recent generation containing this ID */
+ for (i = 0; i <= hp->generation; i++)
+ /* The range may wrap around the end. */
+ if (hp->gen_array[i].txn_min <
+ hp->gen_array[i].txn_max ?
+ (txnid >= hp->gen_array[i].txn_min &&
+ txnid <= hp->gen_array[i].txn_max) :
+ (txnid >= hp->gen_array[i].txn_min ||
+ txnid <= hp->gen_array[i].txn_max))
+ break;
+ DB_ASSERT(i <= hp->generation);
+ generation = hp->gen_array[i].generation;
+ break;
+ case TXNLIST_PGNO:
+ memcpy(&hash, uid, sizeof(hash));
+ generation = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ head = &hp->head[DB_TXNLIST_MASK(hp, hash)];
+
+ for (p = LIST_FIRST(head); p != NULL; p = LIST_NEXT(p, links)) {
+ if (p->type != type)
+ continue;
+ switch (type) {
+ case TXNLIST_TXNID:
+ if (p->u.t.txnid != txnid ||
+ generation != p->u.t.generation)
+ continue;
+ ret = p->u.t.status;
+ break;
+
+ case TXNLIST_PGNO:
+ if (memcmp(uid, p->u.p.uid, DB_FILE_ID_LEN) != 0)
+ continue;
+
+ ret = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ ret = EINVAL;
+ }
+ if (delete == 1) {
+ LIST_REMOVE(p, links);
+ __os_free(dbenv, p);
+ } else if (p != LIST_FIRST(head)) {
+ /* Move it to head of list. */
+ LIST_REMOVE(p, links);
+ LIST_INSERT_HEAD(head, p, links);
+ }
+ *txnlistp = p;
+ return (ret);
+ }
+
+ return (TXN_NOTFOUND);
+}
+
+/*
+ * __db_txnlist_gen --
+ * Change the current generation number.
+ *
+ * PUBLIC: int __db_txnlist_gen __P((DB_ENV *,
+ * PUBLIC: void *, int, u_int32_t, u_int32_t));
+ */
+int
+__db_txnlist_gen(dbenv, listp, incr, min, max)
+ DB_ENV *dbenv;
+ void *listp;
+ int incr;
+ u_int32_t min, max;
+{
+ DB_TXNHEAD *hp;
+ int ret;
+
+ /*
+ * During recovery generation numbers keep track of "restart"
+ * checkpoints and recycle records. Restart checkpoints occur
+ * whenever we take a checkpoint and there are no outstanding
+ * transactions. When that happens, we can reset transaction IDs
+ * back to TXNID_MINIMUM. Currently we only do the reset
+ * at then end of recovery. Recycle records occrur when txnids
+ * are exhausted during runtime. A free range of ids is identified
+ * and logged. This code maintains a stack of ranges. A txnid
+ * is given the generation number of the first range it falls into
+ * in the stack.
+ */
+ hp = (DB_TXNHEAD *)listp;
+ hp->generation += incr;
+ if (incr < 0)
+ memmove(hp->gen_array, &hp->gen_array[1],
+ (hp->generation + 1) * sizeof(hp->gen_array[0]));
+ else {
+ if (hp->generation >= hp->gen_alloc) {
+ hp->gen_alloc *= 2;
+ if ((ret = __os_realloc(dbenv, hp->gen_alloc *
+ sizeof(hp->gen_array[0]), &hp->gen_array)) != 0)
+ return (ret);
+ }
+ memmove(&hp->gen_array[1], &hp->gen_array[0],
+ hp->generation * sizeof(hp->gen_array[0]));
+ hp->gen_array[0].generation = hp->generation;
+ hp->gen_array[0].txn_min = min;
+ hp->gen_array[0].txn_max = max;
+ }
+ return (0);
+}
+
+#define TXN_BUBBLE(AP, MAX) { \
+ int __j; \
+ DB_LSN __tmp; \
+ \
+ for (__j = 0; __j < MAX - 1; __j++) \
+ if (log_compare(&AP[__j], &AP[__j + 1]) < 0) { \
+ __tmp = AP[__j]; \
+ AP[__j] = AP[__j + 1]; \
+ AP[__j + 1] = __tmp; \
+ } \
+}
+
+/*
+ * __db_txnlist_lsnadd --
+ * Add to or re-sort the transaction list lsn entry. Note that since this
+ * is used during an abort, the __txn_undo code calls into the "recovery"
+ * subsystem explicitly, and there is only a single TXNLIST_LSN entry on
+ * the list.
+ *
+ * PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+ */
+int
+__db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
+ DB_ENV *dbenv;
+ void *listp;
+ DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int i, ret;
+
+ hp = (DB_TXNHEAD *)listp;
+
+ for (elp = LIST_FIRST(&hp->head[0]);
+ elp != NULL; elp = LIST_NEXT(elp, links))
+ if (elp->type == TXNLIST_LSN)
+ break;
+
+ if (elp == NULL)
+ return (DB_SURPRISE_KID);
+
+ if (LF_ISSET(TXNLIST_NEW)) {
+ if (elp->u.l.ntxns >= elp->u.l.maxn) {
+ if ((ret = __os_realloc(dbenv,
+ 2 * elp->u.l.maxn * sizeof(DB_LSN),
+ &elp->u.l.lsn_array)) != 0)
+ return (ret);
+ elp->u.l.maxn *= 2;
+ }
+ elp->u.l.lsn_array[elp->u.l.ntxns++] = *lsnp;
+ } else
+ /* Simply replace the 0th element. */
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ /*
+ * If we just added a new entry and there may be NULL entries, so we
+ * have to do a complete bubble sort, not just trickle a changed entry
+ * around.
+ */
+ for (i = 0; i < (!LF_ISSET(TXNLIST_NEW) ? 1 : elp->u.l.ntxns); i++)
+ TXN_BUBBLE(elp->u.l.lsn_array, elp->u.l.ntxns);
+
+ *lsnp = elp->u.l.lsn_array[0];
+
+ return (0);
+}
+
+/*
+ * __db_txnlist_lsninit --
+ * Initialize a transaction list with an lsn array entry.
+ *
+ * PUBLIC: int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+ */
+int
+__db_txnlist_lsninit(dbenv, hp, lsnp)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ DB_LSN *lsnp;
+{
+ DB_TXNLIST *elp;
+ int ret;
+
+ elp = NULL;
+
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ goto err;
+ LIST_INSERT_HEAD(&hp->head[0], elp, links);
+
+ if ((ret = __os_malloc(dbenv,
+ 12 * sizeof(DB_LSN), &elp->u.l.lsn_array)) != 0)
+ goto err;
+ elp->type = TXNLIST_LSN;
+ elp->u.l.maxn = 12;
+ elp->u.l.ntxns = 1;
+ elp->u.l.lsn_array[0] = *lsnp;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+/*
+ * __db_add_limbo -- add pages to the limbo list.
+ * Get the file information and call pgnoadd for each page.
+ *
+ * PUBLIC: int __db_add_limbo __P((DB_ENV *,
+ * PUBLIC: void *, int32_t, db_pgno_t, int32_t));
+ */
+int
+__db_add_limbo(dbenv, info, fileid, pgno, count)
+ DB_ENV *dbenv;
+ void *info;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t count;
+{
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ if ((ret = __dbreg_id_to_fname(dblp, fileid, 0, &fnp)) != 0)
+ return (ret);
+
+ do {
+ if ((ret =
+ __db_txnlist_pgnoadd(dbenv, info, fileid, fnp->ufid,
+ R_ADDR(&dblp->reginfo, fnp->name_off), pgno)) != 0)
+ return (ret);
+ pgno++;
+ } while (--count != 0);
+
+ return (0);
+}
+
+/*
+ * __db_do_the_limbo -- move pages from limbo to free.
+ *
+ * Limbo processing is what ensures that we correctly handle and
+ * recover from page allocations. During recovery, for each database,
+ * we process each in-question allocation, link them into the free list
+ * and then write out the new meta-data page that contains the pointer
+ * to the new beginning of the free list. On an abort, we use our
+ * standard __db_free mechanism in a compensating transaction which logs
+ * the specific modifications to the free list.
+ *
+ * If we run out of log space during an abort, then we can't write the
+ * compensating transaction, so we abandon the idea of a compenating
+ * transaction, and go back to processing how we do during recovery.
+ * The reason that this is not the norm is that it's expensive: it requires
+ * that we flush any database with an in-question allocation. Thus if
+ * a compensating transaction fails, we never try to restart it.
+ *
+ * Since files may be open and closed within transactions (in particular,
+ * the master database for subdatabases), we must be prepared to open
+ * files during this process. If there is a compensating transaction, we
+ * can open the files in that transaction. If this was an abort and there
+ * is no compensating transaction, then we've got to perform these opens
+ * in the context of the aborting transaction so that we do not deadlock.
+ * During recovery, there's no locking, so this isn't an issue.
+ *
+ * What you want to keep in mind when reading this is that there are two
+ * algorithms going on here: ctxn == NULL, then we're either in recovery
+ * or our compensating transaction has failed and we're doing the
+ * "create list and write meta-data page" algorithm. Otherwise, we're in
+ * an abort and doing the "use compensating transaction" algorithm.
+ *
+ * PUBLIC: int __db_do_the_limbo __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN *, DB_TXNHEAD *));
+ */
+int
+__db_do_the_limbo(dbenv, ptxn, txn, hp)
+ DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
+ DB_TXNHEAD *hp;
+{
+ DB_TXNLIST *elp;
+ int h, ret;
+
+ ret = 0;
+ /*
+ * The slots correspond to hash buckets. We've hashed the
+ * fileids into hash buckets and need to pick up all affected
+ * files. (There will only be a single slot for an abort.)
+ */
+ for (h = 0; h < hp->nslots; h++) {
+ if ((elp = LIST_FIRST(&hp->head[h])) == NULL)
+ continue;
+ if (ptxn != NULL) {
+ if ((ret =
+ __db_limbo_move(dbenv, ptxn, txn, elp)) != 0)
+ goto err;
+ } else if ((ret = __db_limbo_bucket(dbenv, txn, elp)) != 0)
+ goto err;
+ }
+
+err: if (ret != 0) {
+ __db_err(dbenv, "Fatal error in abort of an allocation");
+ ret = __db_panic(dbenv, ret);
+ }
+
+ return (ret);
+}
+
+/* Limbo support routines. */
+
+/*
+ * __db_lock_move --
+ * Move a lock from child to parent.
+ */
+static int
+__db_lock_move(dbenv, fileid, pgno, mode, ptxn, txn)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ DB_TXN *ptxn, *txn;
+{
+ DBT lock_dbt;
+ DB_LOCK lock;
+ DB_LOCK_ILOCK lock_obj;
+ DB_LOCKREQ req;
+ int ret;
+
+ lock_obj.pgno = pgno;
+ memcpy(lock_obj.fileid, fileid, DB_FILE_ID_LEN);
+ lock_obj.type = DB_PAGE_LOCK;
+
+ memset(&lock_dbt, 0, sizeof(lock_dbt));
+ lock_dbt.data = &lock_obj;
+ lock_dbt.size = sizeof(lock_obj);
+
+ if ((ret = dbenv->lock_get(dbenv,
+ txn->txnid, 0, &lock_dbt, mode, &lock)) == 0) {
+ memset(&req, 0, sizeof(req));
+ req.lock = lock;
+ req.op = DB_LOCK_TRADE;
+
+ ret = dbenv->lock_vec(dbenv, ptxn->txnid, 0, &req, 1, NULL);
+ }
+ return (ret);
+}
+
+/*
+ * __db_limbo_move
+ * Move just the metapage lock to the parent.
+ */
+static int
+__db_limbo_move(dbenv, ptxn, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
+ DB_TXNLIST *elp;
+{
+ int ret;
+
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO || elp->u.p.locked == 1)
+ continue;
+ if ((ret = __db_lock_move(dbenv, elp->u.p.uid,
+ PGNO_BASE_MD, DB_LOCK_WRITE, ptxn, txn)) != 0)
+ return (ret);
+ elp->u.p.locked = 1;
+ }
+
+ return (0);
+}
+/*
+ * __db_limbo_bucket
+ * Perform limbo processing for a single hash bucket in the txnlist.
+ * txn is the transaction aborting in the case of an abort and ctxn is the
+ * compensating transaction.
+ */
+
+#define T_RESTORED(txn) ((txn) != NULL && F_ISSET(txn, TXN_RESTORED))
+static int
+__db_limbo_bucket(dbenv, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXNLIST *elp;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ DBMETA *meta;
+ DB_TXN *ctxn, *t;
+ db_pgno_t last_pgno, pgno;
+ int dbp_created, in_retry, ret, t_ret;
+
+ ctxn = NULL;
+ in_retry = 0;
+ meta = NULL;
+ mpf = NULL;
+ ret = 0;
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO)
+ continue;
+retry: dbp_created = 0;
+
+ /*
+ * Pick the transaction in which to potentially
+ * log compensations.
+ */
+ if (!in_retry && !IS_RECOVERING(dbenv) && !T_RESTORED(txn)
+ && (ret = __txn_compensate_begin(dbenv, &ctxn)) != 0)
+ return (ret);
+
+ /*
+ * Either use the compensating transaction or
+ * the one passed in, which will be null if recovering.
+ */
+ t = ctxn == NULL ? txn : ctxn;
+
+ /* First try to get a dbp by fileid. */
+ ret = __dbreg_id_to_db(dbenv, t, &dbp, elp->u.p.fileid, 0);
+
+ /*
+ * File is being destroyed. No need to worry about
+ * dealing with recovery of allocations.
+ */
+ if (ret == DB_DELETED ||
+ (ret == 0 && F_ISSET(dbp, DB_AM_DISCARD)))
+ goto next;
+
+ if (ret != 0) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ /*
+ * This tells the system not to lock, which is always
+ * OK, whether this is an abort or recovery.
+ */
+ F_SET(dbp, DB_AM_COMPENSATE);
+ dbp_created = 1;
+
+ /* It is ok if the file is nolonger there. */
+ dbp->type = DB_UNKNOWN;
+ ret = __db_dbopen(dbp, t, elp->u.p.fname, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), PGNO_BASE_MD);
+ if (ret == ENOENT)
+ goto next;
+ }
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if (memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ goto next;
+
+ mpf = dbp->mpf;
+ last_pgno = PGNO_INVALID;
+
+ if (ctxn == NULL) {
+ pgno = PGNO_BASE_MD;
+ if ((ret =
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ last_pgno = meta->free;
+ }
+
+ ret = __db_limbo_fix(dbp, ctxn, elp, &last_pgno, meta);
+ /*
+ * If we were doing compensating transactions, then we are
+ * going to hope this error was due to running out of space.
+ * We'll change modes (into the sync the file mode) and keep
+ * trying. If we weren't doing compensating transactions,
+ * then this is a real error and we're sunk.
+ */
+ if (ret != 0) {
+ if (ret == DB_RUNRECOVERY || ctxn == NULL)
+ goto err;
+ in_retry = 1;
+ goto retry;
+ }
+
+ if (ctxn != NULL) {
+ ret = ctxn->commit(ctxn, DB_TXN_NOSYNC);
+ ctxn = NULL;
+ if (ret != 0)
+ goto retry;
+ goto next;
+ }
+
+ /*
+ * This is where we handle the case where we're explicitly
+ * putting together a free list. We need to decide whether
+ * we have to write the meta-data page, and if we do, then
+ * we need to sync it as well.
+ */
+ if (last_pgno == meta->free) {
+ /* No change to page; just put the page back. */
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ } else {
+ /*
+ * These changes are unlogged so we cannot have the
+ * metapage pointing at pages that are not on disk.
+ * Therefore, we flush the new free list, then update
+ * the metapage. We have to put the meta-data page
+ * first so that it isn't pinned when we try to sync.
+ */
+ if (!IS_RECOVERING(dbenv) && !T_RESTORED(txn))
+ __db_err(dbenv, "Flushing free list to disk");
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ dbp->sync(dbp, 0);
+ pgno = PGNO_BASE_MD;
+ if ((ret =
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ meta->free = last_pgno;
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+ }
+
+next:
+ /*
+ * If we get here, either we have processed the list
+ * or the db file has been deleted or could no be opened.
+ */
+ if (ctxn != NULL &&
+ (t_ret = ctxn->abort(ctxn)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbp_created &&
+ (t_ret = __db_close_i(dbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp = NULL;
+ __os_free(dbenv, elp->u.p.fname);
+ __os_free(dbenv, elp->u.p.pgno_array);
+ if (ret == ENOENT)
+ ret = 0;
+ else if (ret != 0)
+ goto err;
+ }
+
+err: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
+
+/*
+ * __db_limbo_fix --
+ * Process a single limbo entry which describes all the page allocations
+ * for a single file.
+ */
+static int
+__db_limbo_fix(dbp, ctxn, elp, lastp, meta)
+ DB *dbp;
+ DB_TXN *ctxn;
+ DB_TXNLIST *elp;
+ db_pgno_t *lastp;
+ DBMETA *meta;
+{
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *freep, *pagep;
+ db_pgno_t next, pgno;
+ int i, put_page, ret, t_ret;
+
+ /*
+ * Loop through the entries for this txnlist element and
+ * either link them into the free list or write a compensating
+ * record for each.
+ */
+ put_page = 0;
+ ret = 0;
+ mpf = dbp->mpf;
+ dbc = NULL;
+
+ for (i = 0; i < elp->u.p.nentries; i++) {
+ pgno = elp->u.p.pgno_array[i];
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto err;
+ put_page = 1;
+
+ if (IS_ZERO_LSN(LSN(pagep))) {
+ if (ctxn == NULL) {
+ /*
+ * If this is a fatal recovery which
+ * spans a previous crash this page may
+ * be on the free list already.
+ */
+ for (next = *lastp; next != 0; ) {
+ if (next == pgno)
+ break;
+ if ((ret = mpf->get(mpf,
+ &next, 0, &freep)) != 0)
+ goto err;
+ next = NEXT_PGNO(freep);
+ if ((ret =
+ mpf->put(mpf, freep, 0)) != 0)
+ goto err;
+ }
+
+ if (next != pgno) {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ LSN(pagep) = LSN(meta);
+ *lastp = pgno;
+ }
+ } else {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ if (dbc == NULL && (ret =
+ dbp->cursor(dbp, ctxn, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * If the dbp is compensating (because we
+ * opened it), the dbc will automatically be
+ * marked compensating, but in case we didn't
+ * do the open, we have to mark it explicitly.
+ */
+ F_SET(dbc, DBC_COMPENSATE);
+ ret = __db_free(dbc, pagep);
+ put_page = 0;
+ /*
+ * On any error, we hope that the error was
+ * caused due to running out of space, and we
+ * switch modes, doing the processing where we
+ * sync out files instead of doing compensating
+ * transactions. If this was a real error and
+ * not out of space, we assume that some other
+ * call will fail real soon.
+ */
+ if (ret != 0) {
+ /* Assume that this is out of space. */
+ (void)dbc->c_close(dbc);
+ dbc = NULL;
+ goto err;
+ }
+ }
+ }
+
+ if (put_page == 1) {
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
+ put_page = 0;
+ }
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (put_page &&
+ (t_ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */
+
+/*
+ * __db_txnlist_pgnoadd --
+ * Find the txnlist entry for a file and add this pgno, or add the list
+ * entry for the file and then add the pgno.
+ */
+static int
+__db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno)
+ DB_ENV *dbenv;
+ DB_TXNHEAD *hp;
+ int32_t fileid;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ char *fname;
+ db_pgno_t pgno;
+{
+ DB_TXNLIST *elp;
+ u_int32_t hash;
+ int len, ret;
+
+ elp = NULL;
+
+ if (__db_txnlist_find_internal(dbenv, hp,
+ TXNLIST_PGNO, 0, uid, &elp, 0) != 0) {
+ if ((ret =
+ __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
+ goto err;
+ memcpy(&hash, uid, sizeof(hash));
+ LIST_INSERT_HEAD(
+ &hp->head[DB_TXNLIST_MASK(hp, hash)], elp, links);
+ elp->u.p.fileid = fileid;
+ memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN);
+
+ len = (int)strlen(fname) + 1;
+ if ((ret = __os_malloc(dbenv, len, &elp->u.p.fname)) != 0)
+ goto err;
+ memcpy(elp->u.p.fname, fname, len);
+
+ elp->u.p.maxentry = 0;
+ elp->u.p.locked = 0;
+ elp->type = TXNLIST_PGNO;
+ if ((ret = __os_malloc(dbenv,
+ 8 * sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
+ goto err;
+ elp->u.p.maxentry = DB_TXNLIST_MAX_PGNO;
+ elp->u.p.nentries = 0;
+ } else if (elp->u.p.nentries == elp->u.p.maxentry) {
+ elp->u.p.maxentry <<= 1;
+ if ((ret = __os_realloc(dbenv, elp->u.p.maxentry *
+ sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
+ goto err;
+ }
+
+ elp->u.p.pgno_array[elp->u.p.nentries++] = pgno;
+
+ return (0);
+
+err: __db_txnlist_end(dbenv, hp);
+ return (ret);
+}
+
+/*
+ * __db_default_getpgnos --
+ * Fill in default getpgnos information for an application-specific
+ * log record.
+ */
+static int
+__db_default_getpgnos(dbenv, lsnp, summary)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+#ifdef DEBUG
+/*
+ * __db_txnlist_print --
+ * Print out the transaction list.
+ *
+ * PUBLIC: void __db_txnlist_print __P((void *));
+ */
+void
+__db_txnlist_print(listp)
+ void *listp;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *p;
+ int i;
+ char *stats[] = { "ok", "commit", "prepare", "abort", "notfound",
+ "ignore", "expected", "unexpected" };
+
+ hp = (DB_TXNHEAD *)listp;
+
+ printf("Maxid: %lu Generation: %lu\n",
+ (u_long)hp->maxid, (u_long)hp->generation);
+ for (i = 0; i < hp->nslots; i++)
+ for (p = LIST_FIRST(&hp->head[i]); p != NULL; p = LIST_NEXT(p, links)) {
+ switch (p->type) {
+ case TXNLIST_TXNID:
+ printf("TXNID: %lx(%lu): %s\n",
+ (u_long)p->u.t.txnid, (u_long)p->u.t.generation,
+ stats[p->u.t.status]);
+ break;
+ default:
+ printf("Unrecognized type: %d\n", p->type);
+ break;
+ }
+ }
+}
+#endif
diff --git a/storage/bdb/db/db_dup.c b/storage/bdb/db/db_dup.c
new file mode 100644
index 00000000000..2d33d79153f
--- /dev/null
+++ b/storage/bdb/db/db_dup.c
@@ -0,0 +1,281 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_dup.c,v 11.32 2002/08/08 03:57:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __db_ditem --
+ * Remove an item from a page.
+ *
+ * PUBLIC: int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+ */
+int
+__db_ditem(dbc, pagep, indx, nbytes)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx, nbytes;
+{
+ DB *dbp;
+ DBT ldbt;
+ db_indx_t cnt, *inp, offset;
+ int ret;
+ u_int8_t *from;
+
+ dbp = dbc->dbp;
+ if (DBC_LOGGING(dbc)) {
+ ldbt.data = P_ENTRY(dbp, pagep, indx);
+ ldbt.size = nbytes;
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_DUP, PGNO(pagep),
+ (u_int32_t)indx, nbytes, &ldbt, NULL, &LSN(pagep))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ /*
+ * If there's only a single item on the page, we don't have to
+ * work hard.
+ */
+ if (NUM_ENT(pagep) == 1) {
+ NUM_ENT(pagep) = 0;
+ HOFFSET(pagep) = dbp->pgsize;
+ return (0);
+ }
+
+ inp = P_INP(dbp, pagep);
+ /*
+ * Pack the remaining key/data items at the end of the page. Use
+ * memmove(3), the regions may overlap.
+ */
+ from = (u_int8_t *)pagep + HOFFSET(pagep);
+ DB_ASSERT((int)inp[indx] - HOFFSET(pagep) >= 0);
+ memmove(from + nbytes, from, inp[indx] - HOFFSET(pagep));
+ HOFFSET(pagep) += nbytes;
+
+ /* Adjust the indices' offsets. */
+ offset = inp[indx];
+ for (cnt = 0; cnt < NUM_ENT(pagep); ++cnt)
+ if (inp[cnt] < offset)
+ inp[cnt] += nbytes;
+
+ /* Shift the indices down. */
+ --NUM_ENT(pagep);
+ if (indx != NUM_ENT(pagep))
+ memmove(&inp[indx], &inp[indx + 1],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+
+ return (0);
+}
+
+/*
+ * __db_pitem --
+ * Put an item on a page.
+ *
+ * PUBLIC: int __db_pitem
+ * PUBLIC: __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+ */
+int
+__db_pitem(dbc, pagep, indx, nbytes, hdr, data)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ DBT *hdr, *data;
+{
+ DB *dbp;
+ BKEYDATA bk;
+ DBT thdr;
+ db_indx_t *inp;
+ int ret;
+ u_int8_t *p;
+
+ dbp = dbc->dbp;
+ if (nbytes > P_FREESPACE(dbp, pagep)) {
+ DB_ASSERT(nbytes <= P_FREESPACE(dbp, pagep));
+ return (EINVAL);
+ }
+ /*
+ * Put a single item onto a page. The logic figuring out where to
+ * insert and whether it fits is handled in the caller. All we do
+ * here is manage the page shuffling. We cheat a little bit in that
+ * we don't want to copy the dbt on a normal put twice. If hdr is
+ * NULL, we create a BKEYDATA structure on the page, otherwise, just
+ * copy the caller's information onto the page.
+ *
+ * This routine is also used to put entries onto the page where the
+ * entry is pre-built, e.g., during recovery. In this case, the hdr
+ * will point to the entry, and the data argument will be NULL.
+ *
+ * !!!
+ * There's a tremendous potential for off-by-one errors here, since
+ * the passed in header sizes must be adjusted for the structure's
+ * placeholder for the trailing variable-length data field.
+ */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_ADD_DUP, PGNO(pagep),
+ (u_int32_t)indx, nbytes, hdr, data, &LSN(pagep))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ if (hdr == NULL) {
+ B_TSET(bk.type, B_KEYDATA, 0);
+ bk.len = data == NULL ? 0 : data->size;
+
+ thdr.data = &bk;
+ thdr.size = SSZA(BKEYDATA, data);
+ hdr = &thdr;
+ }
+ inp = P_INP(dbp, pagep);
+
+ /* Adjust the index table, then put the item on the page. */
+ if (indx != NUM_ENT(pagep))
+ memmove(&inp[indx + 1], &inp[indx],
+ sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
+ HOFFSET(pagep) -= nbytes;
+ inp[indx] = HOFFSET(pagep);
+ ++NUM_ENT(pagep);
+
+ p = P_ENTRY(dbp, pagep, indx);
+ memcpy(p, hdr->data, hdr->size);
+ if (data != NULL)
+ memcpy(p + hdr->size, data->data, data->size);
+
+ return (0);
+}
+
+/*
+ * __db_relink --
+ * Relink around a deleted page.
+ *
+ * PUBLIC: int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+ */
+int
+__db_relink(dbc, add_rem, pagep, new_next, needlock)
+ DBC *dbc;
+ u_int32_t add_rem;
+ PAGE *pagep, **new_next;
+ int needlock;
+{
+ DB *dbp;
+ PAGE *np, *pp;
+ DB_LOCK npl, ppl;
+ DB_LSN *nlsnp, *plsnp, ret_lsn;
+ DB_MPOOLFILE *mpf;
+ int ret;
+
+ dbp = dbc->dbp;
+ np = pp = NULL;
+ LOCK_INIT(npl);
+ LOCK_INIT(ppl);
+ nlsnp = plsnp = NULL;
+ mpf = dbp->mpf;
+ ret = 0;
+
+ /*
+ * Retrieve and lock the one/two pages. For a remove, we may need
+ * two pages (the before and after). For an add, we only need one
+ * because, the split took care of the prev.
+ */
+ if (pagep->next_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pagep->next_pgno, 0, &np)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
+ goto err;
+ }
+ nlsnp = &np->lsn;
+ }
+ if (add_rem == DB_REM_PAGE && pagep->prev_pgno != PGNO_INVALID) {
+ if (needlock && (ret = __db_lget(dbc,
+ 0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pagep->prev_pgno, 0, &pp)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
+ goto err;
+ }
+ plsnp = &pp->lsn;
+ }
+
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_relink_log(dbp, dbc->txn, &ret_lsn, 0, add_rem,
+ pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp,
+ pagep->next_pgno, nlsnp)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(ret_lsn);
+ if (np != NULL)
+ np->lsn = ret_lsn;
+ if (pp != NULL)
+ pp->lsn = ret_lsn;
+ if (add_rem == DB_REM_PAGE)
+ pagep->lsn = ret_lsn;
+
+ /*
+ * Modify and release the two pages.
+ *
+ * !!!
+ * The parameter new_next gets set to the page following the page we
+ * are removing. If there is no following page, then new_next gets
+ * set to NULL.
+ */
+ if (np != NULL) {
+ if (add_rem == DB_ADD_PAGE)
+ np->prev_pgno = pagep->pgno;
+ else
+ np->prev_pgno = pagep->prev_pgno;
+ if (new_next == NULL)
+ ret = mpf->put(mpf, np, DB_MPOOL_DIRTY);
+ else {
+ *new_next = np;
+ ret = mpf->set(mpf, np, DB_MPOOL_DIRTY);
+ }
+ if (ret != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, npl);
+ } else if (new_next != NULL)
+ *new_next = NULL;
+
+ if (pp != NULL) {
+ pp->next_pgno = pagep->next_pgno;
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if (needlock)
+ (void)__TLPUT(dbc, ppl);
+ }
+ return (0);
+
+err: if (np != NULL)
+ (void)mpf->put(mpf, np, 0);
+ if (needlock)
+ (void)__TLPUT(dbc, npl);
+ if (pp != NULL)
+ (void)mpf->put(mpf, pp, 0);
+ if (needlock)
+ (void)__TLPUT(dbc, ppl);
+ return (ret);
+}
diff --git a/storage/bdb/db/db_iface.c b/storage/bdb/db/db_iface.c
new file mode 100644
index 00000000000..b518c3b14b2
--- /dev/null
+++ b/storage/bdb/db/db_iface.c
@@ -0,0 +1,983 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_iface.c,v 11.77 2002/08/08 03:57:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+static int __db_curinval __P((const DB_ENV *));
+static int __db_fnl __P((const DB_ENV *, const char *));
+static int __db_rdonly __P((const DB_ENV *, const char *));
+static int __dbt_ferr __P((const DB *, const char *, const DBT *, int));
+
+/*
+ * A database should be required to be readonly if it's been explicitly
+ * specified as such or if we're a client in a replicated environment and
+ * we don't have the special "client-writer" designation.
+ */
+#define IS_READONLY(dbp) \
+ (F_ISSET(dbp, DB_AM_RDONLY) || \
+ (F_ISSET((dbp)->dbenv, DB_ENV_REP_CLIENT) && \
+ !F_ISSET((dbp), DB_AM_CL_WRITER)))
+
+/*
+ * __db_cursorchk --
+ * Common cursor argument checking routine.
+ *
+ * PUBLIC: int __db_cursorchk __P((const DB *, u_int32_t));
+ */
+int
+__db_cursorchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* DB_DIRTY_READ is the only valid bit-flag and requires locking. */
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->cursor"));
+ LF_CLR(DB_DIRTY_READ);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_WRITECURSOR:
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ if (!CDB_LOCKING(dbp->dbenv))
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ break;
+ case DB_WRITELOCK:
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "DB->cursor"));
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_ccountchk --
+ * Common cursor count argument checking routine.
+ *
+ * PUBLIC: int __db_ccountchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_ccountchk(dbp, flags, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isvalid;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_count", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cdelchk --
+ * Common cursor delete argument checking routine.
+ *
+ * PUBLIC: int __db_cdelchk __P((const DB *, u_int32_t, int));
+ */
+int
+__db_cdelchk(dbp, flags, isvalid)
+ const DB *dbp;
+ u_int32_t flags;
+ int isvalid;
+{
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "c_del"));
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_UPDATE_SECONDARY:
+ DB_ASSERT(F_ISSET(dbp, DB_AM_SECONDARY));
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_del", 0));
+ }
+
+ /*
+ * The cursor must be initialized, return EINVAL for an invalid cursor,
+ * otherwise 0.
+ */
+ return (isvalid ? 0 : __db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cgetchk --
+ * Common cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cgetchk(dbp, key, data, flags, isvalid)
+ const DB *dbp;
+ DBT *key, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int dirty, multi, ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ multi = 1;
+ if (LF_ISSET(DB_MULTIPLE) && LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ LF_CLR(DB_MULTIPLE | DB_MULTIPLE_KEY);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
+ if (dbp->type != DB_QUEUE)
+ goto err;
+ break;
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ case DB_NEXT:
+ case DB_NEXT_DUP:
+ case DB_NEXT_NODUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 1));
+ break;
+ case DB_GET_BOTHC:
+ if (dbp->type == DB_QUEUE)
+ goto err;
+ break;
+ case DB_GET_RECNO:
+ /*
+ * The one situation in which this might be legal with a
+ * non-RECNUM dbp is if dbp is a secondary and its primary is
+ * DB_AM_RECNUM.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECNUM) &&
+ (!F_ISSET(dbp, DB_AM_SECONDARY) ||
+ !F_ISSET(dbp->s_primary, DB_AM_RECNUM)))
+ goto err;
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
+ goto err;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE(_KEY) requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
+ /*
+ * The cursor must be initialized for DB_CURRENT, DB_GET_RECNO and
+ * DB_NEXT_DUP. Return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || (flags != DB_CURRENT &&
+ flags != DB_GET_RECNO && flags != DB_NEXT_DUP))
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_cputchk --
+ * Common cursor put argument checking routine.
+ *
+ * PUBLIC: int __db_cputchk __P((const DB *,
+ * PUBLIC: const DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cputchk(dbp, key, data, flags, isvalid)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int key_flags, ret;
+
+ key_flags = 0;
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "c_put"));
+
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+ else {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_put forbidden on secondary indices");
+ return (EINVAL);
+ }
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case DB_AFTER:
+ case DB_BEFORE:
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_HASH: /* Only with unsorted duplicates. */
+ if (!F_ISSET(dbp, DB_AM_DUP))
+ goto err;
+ if (dbp->dup_compare != NULL)
+ goto err;
+ break;
+ case DB_QUEUE: /* Not permitted. */
+ goto err;
+ case DB_RECNO: /* Only with mutable record numbers. */
+ if (!F_ISSET(dbp, DB_AM_RENUMBER))
+ goto err;
+ key_flags = 1;
+ break;
+ default:
+ goto err;
+ }
+ break;
+ case DB_CURRENT:
+ /*
+ * If there is a comparison function, doing a DB_CURRENT
+ * must not change the part of the data item that is used
+ * for the comparison.
+ */
+ break;
+ case DB_NODUPDATA:
+ if (!F_ISSET(dbp, DB_AM_DUPSORT))
+ goto err;
+ /* FALLTHROUGH */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ key_flags = 1;
+ break;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DBcursor->c_put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if (key_flags && (ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /*
+ * The cursor must be initialized for anything other than DB_KEYFIRST
+ * and DB_KEYLAST, return EINVAL for an invalid cursor, otherwise 0.
+ */
+ if (isvalid || flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA)
+ return (0);
+
+ return (__db_curinval(dbp->dbenv));
+}
+
+/*
+ * __db_pgetchk --
+ * DB->pget flag check.
+ *
+ * PUBLIC: int __db_pgetchk __P((const DB *, const DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_pgetchk(dbp, skey, pkey, data, flags)
+ const DB *dbp;
+ const DBT *skey;
+ DBT *pkey, *data;
+ u_int32_t flags;
+{
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DB->pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ /* DB_CONSUME makes no sense on a secondary index. */
+ LF_CLR(DB_RMW);
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ return (__db_ferr(dbp->dbenv, "DB->pget", 0));
+ default:
+ /* __db_getchk will catch the rest. */
+ break;
+ }
+
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 1)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_getchk(dbp, skey, data, save_flags));
+}
+
+/*
+ * __db_cpgetchk --
+ * Secondary-index cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cpgetchk __P((const DB *,
+ * PUBLIC: DBT *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cpgetchk(dbp, skey, pkey, data, flags, isvalid)
+ const DB *dbp;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ LF_CLR(DB_RMW);
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ /* DB_CONSUME makes no sense on a secondary index. */
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_pget", 0));
+ case DB_GET_BOTH:
+ /* DB_GET_BOTH is "get both the primary and the secondary". */
+ if (pkey == NULL) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH requires both a secondary and a primary key");
+ return (EINVAL);
+ }
+ break;
+ default:
+ /* __db_cgetchk will catch the rest. */
+ break;
+ }
+
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 0)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_cgetchk(dbp, skey, data, save_flags, isvalid));
+}
+
+/*
+ * __db_delchk --
+ * Common delete argument checking routine.
+ *
+ * PUBLIC: int __db_delchk __P((const DB *, DBT *, u_int32_t));
+ */
+int
+__db_delchk(dbp, key, flags)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+{
+ COMPQUIET(key, NULL);
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "delete"));
+
+ /* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->del", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_getchk --
+ * Common get argument checking routine.
+ *
+ * PUBLIC: int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+ */
+int
+__db_getchk(dbp, key, data, flags)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ int dirty, multi, ret;
+
+ /*
+ * Check for read-modify-write validity. DB_RMW doesn't make sense
+ * with CDB cursors since if you're going to write the cursor, you
+ * had to create it with DB_WRITECURSOR. Regardless, we check for
+ * LOCKING_ON and not STD_LOCKING, as we don't want to disallow it.
+ * If this changes, confirm that DB does not itself set the DB_RMW
+ * flag in a path where CDB may have been configured.
+ */
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ if (LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ multi = LF_ISSET(DB_MULTIPLE) ? 1 : 0;
+ LF_CLR(DB_MULTIPLE);
+ }
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_GET_BOTH:
+ break;
+ case DB_SET_RECNO:
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
+ goto err;
+ break;
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DB->get", 1));
+ if (dbp->type == DB_QUEUE)
+ break;
+ /* FALLTHROUGH */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->get", 0));
+ }
+
+ /*
+ * Check for invalid key/data flags.
+ *
+ * XXX: Dave Krinsky
+ * Remember to modify this when we fix the flag-returning problem.
+ */
+ if ((ret = __dbt_ferr(dbp, "key", key, flags == DB_SET_RECNO)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 1)) != 0)
+ return (ret);
+
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_joinchk --
+ * Common join argument checking routine.
+ *
+ * PUBLIC: int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+ */
+int
+__db_joinchk(dbp, curslist, flags)
+ const DB *dbp;
+ DBC * const *curslist;
+ u_int32_t flags;
+{
+ DB_TXN *txn;
+ int i;
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_NOSORT:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->join", 0));
+ }
+
+ if (curslist == NULL || curslist[0] == NULL) {
+ __db_err(dbp->dbenv,
+ "At least one secondary cursor must be specified to DB->join");
+ return (EINVAL);
+ }
+
+ txn = curslist[0]->txn;
+ for (i = 1; curslist[i] != NULL; i++)
+ if (curslist[i]->txn != txn) {
+ __db_err(dbp->dbenv,
+ "All secondary cursors must share the same transaction");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_joingetchk --
+ * Common join_get argument checking routine.
+ *
+ * PUBLIC: int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+ */
+int
+__db_joingetchk(dbp, key, flags)
+ const DB *dbp;
+ DBT *key;
+ u_int32_t flags;
+{
+
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ switch (flags) {
+ case 0:
+ case DB_JOIN_ITEM:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
+ }
+
+ /*
+ * A partial get of the key of a join cursor don't make much sense;
+ * the entire key is necessary to query the primary database
+ * and find the datum, and so regardless of the size of the key
+ * it would not be a performance improvement. Since it would require
+ * special handling, we simply disallow it.
+ *
+ * A partial get of the data, however, potentially makes sense (if
+ * all possible data are a predictable large structure, for instance)
+ * and causes us no headaches, so we permit it.
+ */
+ if (F_ISSET(key, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL may not be set on key during join_get");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_putchk --
+ * Common put argument checking routine.
+ *
+ * PUBLIC: int __db_putchk
+ * PUBLIC: __P((const DB *, DBT *, const DBT *, u_int32_t, int));
+ */
+int
+__db_putchk(dbp, key, data, flags, isdup)
+ const DB *dbp;
+ DBT *key;
+ const DBT *data;
+ u_int32_t flags;
+ int isdup;
+{
+ int ret, returnkey;
+
+ returnkey = 0;
+
+ /* Check for changes to a read-only tree. */
+ if (IS_READONLY(dbp))
+ return (__db_rdonly(dbp->dbenv, "put"));
+
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv, "DB->put forbidden on secondary indices");
+ return (EINVAL);
+ }
+
+ /* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
+ switch (flags) {
+ case 0:
+ case DB_NOOVERWRITE:
+ break;
+ case DB_APPEND:
+ if (dbp->type != DB_RECNO && dbp->type != DB_QUEUE)
+ goto err;
+ returnkey = 1;
+ break;
+ case DB_NODUPDATA:
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ break;
+ /* FALLTHROUGH */
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->put", 0));
+ }
+
+ /* Check for invalid key/data flags. */
+ if ((ret = __dbt_ferr(dbp, "key", key, returnkey)) != 0)
+ return (ret);
+ if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
+ return (ret);
+
+ /* Check for partial puts in the presence of duplicates. */
+ if (isdup && F_ISSET(data, DB_DBT_PARTIAL)) {
+ __db_err(dbp->dbenv,
+"a partial put in the presence of duplicates requires a cursor operation");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_statchk --
+ * Common stat argument checking routine.
+ *
+ * PUBLIC: int __db_statchk __P((const DB *, u_int32_t));
+ */
+int
+__db_statchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ case DB_FAST_STAT:
+ case DB_CACHED_COUNTS: /* Deprecated and undocumented. */
+ break;
+ case DB_RECORDCOUNT: /* Deprecated and undocumented. */
+ if (dbp->type == DB_RECNO)
+ break;
+ if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))
+ break;
+ goto err;
+ default:
+err: return (__db_ferr(dbp->dbenv, "DB->stat", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_syncchk --
+ * Common sync argument checking routine.
+ *
+ * PUBLIC: int __db_syncchk __P((const DB *, u_int32_t));
+ */
+int
+__db_syncchk(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ default:
+ return (__db_ferr(dbp->dbenv, "DB->sync", 0));
+ }
+
+ return (0);
+}
+
+/*
+ * __dbt_ferr --
+ * Check a DBT for flag errors.
+ */
+static int
+__dbt_ferr(dbp, name, dbt, check_thread)
+ const DB *dbp;
+ const char *name;
+ const DBT *dbt;
+ int check_thread;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Check for invalid DBT flags. We allow any of the flags to be
+ * specified to any DB or DBcursor call so that applications can
+ * set DB_DBT_MALLOC when retrieving a data item from a secondary
+ * database and then specify that same DBT as a key to a primary
+ * database, without having to clear flags.
+ */
+ if ((ret = __db_fchk(dbenv, name, dbt->flags, DB_DBT_APPMALLOC |
+ DB_DBT_MALLOC | DB_DBT_DUPOK | DB_DBT_REALLOC | DB_DBT_USERMEM |
+ DB_DBT_PARTIAL)) != 0)
+ return (ret);
+ switch (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ case 0:
+ case DB_DBT_MALLOC:
+ case DB_DBT_REALLOC:
+ case DB_DBT_USERMEM:
+ break;
+ default:
+ return (__db_ferr(dbenv, name, 1));
+ }
+
+ if (check_thread && DB_IS_THREADED(dbp) &&
+ !F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
+ __db_err(dbenv,
+ "DB_THREAD mandates memory allocation flag on DBT %s",
+ name);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_rdonly --
+ * Common readonly message.
+ */
+static int
+__db_rdonly(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: attempt to modify a read-only tree", name);
+ return (EACCES);
+}
+
+/*
+ * __db_fnl --
+ * Common flag-needs-locking message.
+ */
+static int
+__db_fnl(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv,
+ "%s: the DB_DIRTY_READ and DB_RMW flags require locking", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_curinval
+ * Report that a cursor is in an invalid state.
+ */
+static int
+__db_curinval(dbenv)
+ const DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "Cursor position must be set before performing this operation");
+ return (EINVAL);
+}
+
+/*
+ * __db_secondary_corrupt --
+ * Report that a secondary index appears corrupt, as it has a record
+ * that does not correspond to a record in the primary.
+ *
+ * PUBLIC: int __db_secondary_corrupt __P((DB *));
+ */
+int
+__db_secondary_corrupt(dbp)
+ DB *dbp;
+{
+
+ __db_err(dbp->dbenv,
+ "Secondary index corrupt: item in secondary not found in primary");
+ return (DB_SECONDARY_BAD);
+}
+
+/*
+ * __db_associatechk --
+ * Argument checking routine for DB->associate().
+ *
+ * PUBLIC: int __db_associatechk __P((DB *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associatechk(dbp, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (F_ISSET(sdbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary index handles may not be re-associated");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary indices may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "Primary databases may not be configured with duplicates");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
+ __db_err(dbenv,
+ "Renumbering recno databases may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (callback == NULL &&
+ (!F_ISSET(dbp, DB_AM_RDONLY) || !F_ISSET(sdbp, DB_AM_RDONLY))) {
+ __db_err(dbenv,
+ "Callback function may be NULL only when database handles are read-only");
+ return (EINVAL);
+ }
+
+ return (__db_fchk(dbenv,
+ "DB->associate", flags, DB_CREATE | DB_AUTO_COMMIT));
+}
+
+/*
+ * __db_txn_auto --
+ * Handle DB_AUTO_COMMIT initialization.
+ *
+ * PUBLIC: int __db_txn_auto __P((DB *, DB_TXN **));
+ */
+int
+__db_txn_auto(dbp, txnidp)
+ DB *dbp;
+ DB_TXN **txnidp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (*txnidp != NULL) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified along with a transaction handle");
+ return (EINVAL);
+ }
+
+ if (!TXN_ON(dbenv)) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified in non-transactional environment");
+ return (EINVAL);
+ }
+
+ return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));
+}
diff --git a/storage/bdb/db/db_join.c b/storage/bdb/db/db_join.c
new file mode 100644
index 00000000000..6281b1a8383
--- /dev/null
+++ b/storage/bdb/db/db_join.c
@@ -0,0 +1,822 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_join.c,v 11.55 2002/08/08 03:57:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_join.h"
+#include "dbinc/btree.h"
+
+static int __db_join_close __P((DBC *));
+static int __db_join_cmp __P((const void *, const void *));
+static int __db_join_del __P((DBC *, u_int32_t));
+static int __db_join_get __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+static int __db_join_primget __P((DB *,
+ DB_TXN *, u_int32_t, DBT *, DBT *, u_int32_t));
+static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t));
+
+/*
+ * Check to see if the Nth secondary cursor of join cursor jc is pointing
+ * to a sorted duplicate set.
+ */
+#define SORTED_SET(jc, n) ((jc)->j_curslist[(n)]->dbp->dup_compare != NULL)
+
+/*
+ * This is the duplicate-assisted join functionality. Right now we're
+ * going to write it such that we return one item at a time, although
+ * I think we may need to optimize it to return them all at once.
+ * It should be easier to get it working this way, and I believe that
+ * changing it should be fairly straightforward.
+ *
+ * We optimize the join by sorting cursors from smallest to largest
+ * cardinality. In most cases, this is indeed optimal. However, if
+ * a cursor with large cardinality has very few data in common with the
+ * first cursor, it is possible that the join will be made faster by
+ * putting it earlier in the cursor list. Since we have no way to detect
+ * cases like this, we simply provide a flag, DB_JOIN_NOSORT, which retains
+ * the sort order specified by the caller, who may know more about the
+ * structure of the data.
+ *
+ * The first cursor moves sequentially through the duplicate set while
+ * the others search explicitly for the duplicate in question.
+ *
+ */
+
+/*
+ * __db_join --
+ * This is the interface to the duplicate-assisted join functionality.
+ * In the same way that cursors mark a position in a database, a cursor
+ * can mark a position in a join. While most cursors are created by the
+ * cursor method of a DB, join cursors are created through an explicit
+ * call to DB->join.
+ *
+ * The curslist is an array of existing, intialized cursors and primary
+ * is the DB of the primary file. The data item that joins all the
+ * cursors in the curslist is used as the key into the primary and that
+ * key and data are returned. When no more items are left in the join
+ * set, the c_next operation off the join cursor will return DB_NOTFOUND.
+ *
+ * PUBLIC: int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+ */
+int
+__db_join(primary, curslist, dbcp, flags)
+ DB *primary;
+ DBC **curslist, **dbcp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ JOIN_CURSOR *jc;
+ int ret;
+ u_int32_t i;
+ size_t ncurs, nslots;
+
+ COMPQUIET(nslots, 0);
+
+ PANIC_CHECK(primary->dbenv);
+
+ if ((ret = __db_joinchk(primary, curslist, flags)) != 0)
+ return (ret);
+
+ dbc = NULL;
+ jc = NULL;
+ dbenv = primary->dbenv;
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DBC), &dbc)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(JOIN_CURSOR), &jc)) != 0)
+ goto err;
+
+ if ((ret = __os_malloc(dbenv, 256, &jc->j_key.data)) != 0)
+ goto err;
+ jc->j_key.ulen = 256;
+ F_SET(&jc->j_key, DB_DBT_USERMEM);
+
+ F_SET(&jc->j_rdata, DB_DBT_REALLOC);
+
+ for (jc->j_curslist = curslist;
+ *jc->j_curslist != NULL; jc->j_curslist++)
+ ;
+
+ /*
+ * The number of cursor slots we allocate is one greater than
+ * the number of cursors involved in the join, because the
+ * list is NULL-terminated.
+ */
+ ncurs = jc->j_curslist - curslist;
+ nslots = ncurs + 1;
+
+ /*
+ * !!! -- A note on the various lists hanging off jc.
+ *
+ * j_curslist is the initial NULL-terminated list of cursors passed
+ * into __db_join. The original cursors are not modified; pristine
+ * copies are required because, in databases with unsorted dups, we
+ * must reset all of the secondary cursors after the first each
+ * time the first one is incremented, or else we will lose data
+ * which happen to be sorted differently in two different cursors.
+ *
+ * j_workcurs is where we put those copies that we're planning to
+ * work with. They're lazily c_dup'ed from j_curslist as we need
+ * them, and closed when the join cursor is closed or when we need
+ * to reset them to their original values (in which case we just
+ * c_dup afresh).
+ *
+ * j_fdupcurs is an array of cursors which point to the first
+ * duplicate in the duplicate set that contains the data value
+ * we're currently interested in. We need this to make
+ * __db_join_get correctly return duplicate duplicates; i.e., if a
+ * given data value occurs twice in the set belonging to cursor #2,
+ * and thrice in the set belonging to cursor #3, and once in all
+ * the other cursors, successive calls to __db_join_get need to
+ * return that data item six times. To make this happen, each time
+ * cursor N is allowed to advance to a new datum, all cursors M
+ * such that M > N have to be reset to the first duplicate with
+ * that datum, so __db_join_get will return all the dup-dups again.
+ * We could just reset them to the original cursor from j_curslist,
+ * but that would be a bit slower in the unsorted case and a LOT
+ * slower in the sorted one.
+ *
+ * j_exhausted is a list of boolean values which represent
+ * whether or not their corresponding cursors are "exhausted",
+ * i.e. whether the datum under the corresponding cursor has
+ * been found not to exist in any unreturned combinations of
+ * later secondary cursors, in which case they are ready to be
+ * incremented.
+ */
+
+ /* We don't want to free regions whose callocs have failed. */
+ jc->j_curslist = NULL;
+ jc->j_workcurs = NULL;
+ jc->j_fdupcurs = NULL;
+ jc->j_exhausted = NULL;
+
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_curslist)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_workcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(DBC *),
+ &jc->j_fdupcurs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, nslots, sizeof(u_int8_t),
+ &jc->j_exhausted)) != 0)
+ goto err;
+ for (i = 0; curslist[i] != NULL; i++) {
+ jc->j_curslist[i] = curslist[i];
+ jc->j_workcurs[i] = NULL;
+ jc->j_fdupcurs[i] = NULL;
+ jc->j_exhausted[i] = 0;
+ }
+ jc->j_ncurs = (u_int32_t)ncurs;
+
+ /*
+ * If DB_JOIN_NOSORT is not set, optimize secondary cursors by
+ * sorting in order of increasing cardinality.
+ */
+ if (!LF_ISSET(DB_JOIN_NOSORT))
+ qsort(jc->j_curslist, ncurs, sizeof(DBC *), __db_join_cmp);
+
+ /*
+ * We never need to reset the 0th cursor, so there's no
+ * solid reason to use workcurs[0] rather than curslist[0] in
+ * join_get. Nonetheless, it feels cleaner to do it for symmetry,
+ * and this is the most logical place to copy it.
+ *
+ * !!!
+ * There's no need to close the new cursor if we goto err only
+ * because this is the last thing that can fail. Modifier of this
+ * function beware!
+ */
+ if ((ret = jc->j_curslist[0]->c_dup(jc->j_curslist[0], jc->j_workcurs,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+ dbc->c_close = __db_join_close;
+ dbc->c_del = __db_join_del;
+ dbc->c_get = __db_join_get;
+ dbc->c_put = __db_join_put;
+ dbc->internal = (DBC_INTERNAL *) jc;
+ dbc->dbp = primary;
+ jc->j_primary = primary;
+
+ *dbcp = dbc;
+
+ MUTEX_THREAD_LOCK(dbenv, primary->mutexp);
+ TAILQ_INSERT_TAIL(&primary->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, primary->mutexp);
+
+ return (0);
+
+err: if (jc != NULL) {
+ if (jc->j_curslist != NULL)
+ __os_free(dbenv, jc->j_curslist);
+ if (jc->j_workcurs != NULL) {
+ if (jc->j_workcurs[0] != NULL)
+ __os_free(dbenv, jc->j_workcurs[0]);
+ __os_free(dbenv, jc->j_workcurs);
+ }
+ if (jc->j_fdupcurs != NULL)
+ __os_free(dbenv, jc->j_fdupcurs);
+ if (jc->j_exhausted != NULL)
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc);
+ }
+ if (dbc != NULL)
+ __os_free(dbenv, dbc);
+ return (ret);
+}
+
+static int
+__db_join_put(dbc, key, data, flags)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(key, NULL);
+ COMPQUIET(data, NULL);
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_del(dbc, flags)
+ DBC *dbc;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbc->dbp->dbenv);
+
+ COMPQUIET(flags, 0);
+ return (EINVAL);
+}
+
+static int
+__db_join_get(dbc, key_arg, data_arg, flags)
+ DBC *dbc;
+ DBT *key_arg, *data_arg;
+ u_int32_t flags;
+{
+ DBT *key_n, key_n_mem;
+ DB *dbp;
+ DBC *cp;
+ JOIN_CURSOR *jc;
+ int db_manage_data, ret;
+ u_int32_t i, j, operation, opmods;
+
+ dbp = dbc->dbp;
+ jc = (JOIN_CURSOR *)dbc->internal;
+
+ PANIC_CHECK(dbp->dbenv);
+
+ operation = LF_ISSET(DB_OPFLAGS_MASK);
+
+ /* !!!
+ * If the set of flags here changes, check that __db_join_primget
+ * is updated to handle them properly.
+ */
+ opmods = LF_ISSET(DB_RMW | DB_DIRTY_READ);
+
+ if ((ret = __db_joingetchk(dbp, key_arg, flags)) != 0)
+ return (ret);
+
+ /*
+ * Since we are fetching the key as a datum in the secondary indices,
+ * we must be careful of caller-specified DB_DBT_* memory
+ * management flags. If necessary, use a stack-allocated DBT;
+ * we'll appropriately copy and/or allocate the data later.
+ */
+ if (F_ISSET(key_arg, DB_DBT_USERMEM) ||
+ F_ISSET(key_arg, DB_DBT_MALLOC)) {
+ /* We just use the default buffer; no need to go malloc. */
+ key_n = &key_n_mem;
+ memset(key_n, 0, sizeof(DBT));
+ } else {
+ /*
+ * Either DB_DBT_REALLOC or the default buffer will work
+ * fine if we have to reuse it, as we do.
+ */
+ key_n = key_arg;
+ }
+
+ /*
+ * If our last attempt to do a get on the primary key failed,
+ * short-circuit the join and try again with the same key.
+ */
+ if (F_ISSET(jc, JOIN_RETRY))
+ goto samekey;
+ F_CLR(jc, JOIN_RETRY);
+
+retry: ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n,
+ opmods | (jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT));
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv,
+ jc->j_key.ulen, &jc->j_key.data)) != 0)
+ goto mem_err;
+ goto retry;
+ }
+
+ /*
+ * If ret == DB_NOTFOUND, we're out of elements of the first
+ * secondary cursor. This is how we finally finish the join
+ * if all goes well.
+ */
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If jc->j_exhausted[0] == 1, we've just advanced the first cursor,
+ * and we're going to want to advance all the cursors that point to
+ * the first member of a duplicate duplicate set (j_fdupcurs[1..N]).
+ * Close all the cursors in j_fdupcurs; we'll reopen them the
+ * first time through the upcoming loop.
+ */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ if (jc->j_fdupcurs[i] != NULL &&
+ (ret = jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ goto err;
+ jc->j_fdupcurs[i] = NULL;
+ }
+
+ /*
+ * If jc->j_curslist[1] == NULL, we have only one cursor in the join.
+ * Thus, we can safely increment that one cursor on each call
+ * to __db_join_get, and we signal this by setting jc->j_exhausted[0]
+ * right away.
+ *
+ * Otherwise, reset jc->j_exhausted[0] to 0, so that we don't
+ * increment it until we know we're ready to.
+ */
+ if (jc->j_curslist[1] == NULL)
+ jc->j_exhausted[0] = 1;
+ else
+ jc->j_exhausted[0] = 0;
+
+ /* We have the first element; now look for it in the other cursors. */
+ for (i = 1; i < jc->j_ncurs; i++) {
+ DB_ASSERT(jc->j_curslist[i] != NULL);
+ if (jc->j_workcurs[i] == NULL)
+ /* If this is NULL, we need to dup curslist into it. */
+ if ((ret = jc->j_curslist[i]->c_dup(
+ jc->j_curslist[i], jc->j_workcurs + i,
+ DB_POSITIONI)) != 0)
+ goto err;
+
+retry2: cp = jc->j_workcurs[i];
+
+ if ((ret = __db_join_getnext(cp, &jc->j_key, key_n,
+ jc->j_exhausted[i], opmods)) == DB_NOTFOUND) {
+ /*
+ * jc->j_workcurs[i] has no more of the datum we're
+ * interested in. Go back one cursor and get
+ * a new dup. We can't just move to a new
+ * element of the outer relation, because that way
+ * we might miss duplicate duplicates in cursor i-1.
+ *
+ * If this takes us back to the first cursor,
+ * -then- we can move to a new element of the outer
+ * relation.
+ */
+ --i;
+ jc->j_exhausted[i] = 1;
+
+ if (i == 0) {
+ for (j = 1; jc->j_workcurs[j] != NULL; j++) {
+ /*
+ * We're moving to a new element of
+ * the first secondary cursor. If
+ * that cursor is sorted, then any
+ * other sorted cursors can be safely
+ * reset to the first duplicate
+ * duplicate in the current set if we
+ * have a pointer to it (we can't just
+ * leave them be, or we'll miss
+ * duplicate duplicates in the outer
+ * relation).
+ *
+ * If the first cursor is unsorted, or
+ * if cursor j is unsorted, we can
+ * make no assumptions about what
+ * we're looking for next or where it
+ * will be, so we reset to the very
+ * beginning (setting workcurs NULL
+ * will achieve this next go-round).
+ *
+ * XXX: This is likely to break
+ * horribly if any two cursors are
+ * both sorted, but have different
+ * specified sort functions. For,
+ * now, we dismiss this as pathology
+ * and let strange things happen--we
+ * can't make rope childproof.
+ */
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ if (!SORTED_SET(jc, 0) ||
+ !SORTED_SET(jc, j) ||
+ jc->j_fdupcurs[j] == NULL)
+ /*
+ * Unsafe conditions;
+ * reset fully.
+ */
+ jc->j_workcurs[j] = NULL;
+ else
+ /* Partial reset suffices. */
+ if ((jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j],
+ &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ }
+ goto retry;
+ /* NOTREACHED */
+ }
+
+ /*
+ * We're about to advance the cursor and need to
+ * reset all of the workcurs[j] where j>i, so that
+ * we don't miss any duplicate duplicates.
+ */
+ for (j = i + 1;
+ jc->j_workcurs[j] != NULL;
+ j++) {
+ if ((ret = jc->j_workcurs[j]->c_close(
+ jc->j_workcurs[j])) != 0)
+ goto err;
+ jc->j_exhausted[j] = 0;
+ if (jc->j_fdupcurs[j] != NULL &&
+ (ret = jc->j_fdupcurs[j]->c_dup(
+ jc->j_fdupcurs[j], &jc->j_workcurs[j],
+ DB_POSITIONI)) != 0)
+ goto err;
+ else
+ jc->j_workcurs[j] = NULL;
+ }
+ goto retry2;
+ /* NOTREACHED */
+ }
+
+ if (ret == ENOMEM) {
+ jc->j_key.ulen <<= 1;
+ if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen,
+ &jc->j_key.data)) != 0) {
+mem_err: __db_err(dbp->dbenv,
+ "Allocation failed for join key, len = %lu",
+ (u_long)jc->j_key.ulen);
+ goto err;
+ }
+ goto retry2;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If we made it this far, we've found a matching
+ * datum in cursor i. Mark the current cursor
+ * unexhausted, so we don't miss any duplicate
+ * duplicates the next go-round--unless this is the
+ * very last cursor, in which case there are none to
+ * miss, and we'll need that exhausted flag to finally
+ * get a DB_NOTFOUND and move on to the next datum in
+ * the outermost cursor.
+ */
+ if (i + 1 != jc->j_ncurs)
+ jc->j_exhausted[i] = 0;
+ else
+ jc->j_exhausted[i] = 1;
+
+ /*
+ * If jc->j_fdupcurs[i] is NULL and the ith cursor's dups are
+ * sorted, then we're here for the first time since advancing
+ * cursor 0, and we have a new datum of interest.
+ * jc->j_workcurs[i] points to the beginning of a set of
+ * duplicate duplicates; store this into jc->j_fdupcurs[i].
+ */
+ if (SORTED_SET(jc, i) && jc->j_fdupcurs[i] == NULL && (ret =
+ cp->c_dup(cp, &jc->j_fdupcurs[i], DB_POSITIONI)) != 0)
+ goto err;
+
+ }
+
+err: if (ret != 0)
+ return (ret);
+
+ if (0) {
+samekey: /*
+ * Get the key we tried and failed to return last time;
+ * it should be the current datum of all the secondary cursors.
+ */
+ if ((ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n, DB_CURRENT | opmods)) != 0)
+ return (ret);
+ F_CLR(jc, JOIN_RETRY);
+ }
+
+ /*
+ * ret == 0; we have a key to return.
+ *
+ * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to copy the key
+ * back into the dbt we were given for the key; call __db_retcopy.
+ * Otherwise, assert that we do not need to copy anything and proceed.
+ */
+ DB_ASSERT(F_ISSET(
+ key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) || key_n == key_arg);
+
+ if (F_ISSET(key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) {
+ /*
+ * The retcopy failed, most commonly because we have a user
+ * buffer for the key which is too small. Set things up to
+ * retry next time, and return.
+ */
+ F_SET(jc, JOIN_RETRY);
+ return (ret);
+ }
+
+ /*
+ * If DB_JOIN_ITEM is set, we return it; otherwise we do the lookup
+ * in the primary and then return.
+ *
+ * Note that we use key_arg here; it is safe (and appropriate)
+ * to do so.
+ */
+ if (operation == DB_JOIN_ITEM)
+ return (0);
+
+ /*
+ * If data_arg->flags == 0--that is, if DB is managing the
+ * data DBT's memory--it's not safe to just pass the DBT
+ * through to the primary get call, since we don't want that
+ * memory to belong to the primary DB handle (and if the primary
+ * is free-threaded, it can't anyway).
+ *
+ * Instead, use memory that is managed by the join cursor, in
+ * jc->j_rdata.
+ */
+ if (!F_ISSET(data_arg, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM))
+ db_manage_data = 1;
+ else
+ db_manage_data = 0;
+ if ((ret = __db_join_primget(jc->j_primary,
+ jc->j_curslist[0]->txn, jc->j_curslist[0]->locker, key_arg,
+ db_manage_data ? &jc->j_rdata : data_arg, opmods)) != 0) {
+ if (ret == DB_NOTFOUND)
+ /*
+ * If ret == DB_NOTFOUND, the primary and secondary
+ * are out of sync; every item in each secondary
+ * should correspond to something in the primary,
+ * or we shouldn't have done the join this way.
+ * Wail.
+ */
+ ret = __db_secondary_corrupt(jc->j_primary);
+ else
+ /*
+ * The get on the primary failed for some other
+ * reason, most commonly because we're using a user
+ * buffer that's not big enough. Flag our failure
+ * so we can return the same key next time.
+ */
+ F_SET(jc, JOIN_RETRY);
+ }
+ if (db_manage_data && ret == 0) {
+ data_arg->data = jc->j_rdata.data;
+ data_arg->size = jc->j_rdata.size;
+ }
+
+ return (ret);
+}
+
+static int
+__db_join_close(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ JOIN_CURSOR *jc;
+ int ret, t_ret;
+ u_int32_t i;
+
+ jc = (JOIN_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ ret = t_ret = 0;
+
+ /*
+ * Remove from active list of join cursors. Note that this
+ * must happen before any action that can fail and return, or else
+ * __db_close may loop indefinitely.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ TAILQ_REMOVE(&dbp->join_queue, dbc, links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Close any open scratch cursors. In each case, there may
+ * not be as many outstanding as there are cursors in
+ * curslist, but we want to close whatever's there.
+ *
+ * If any close fails, there's no reason not to close everything else;
+ * we'll just return the error code of the last one to fail. There's
+ * not much the caller can do anyway, since these cursors only exist
+ * hanging off a db-internal data structure that they shouldn't be
+ * mucking with.
+ */
+ for (i = 0; i < jc->j_ncurs; i++) {
+ if (jc->j_workcurs[i] != NULL && (t_ret =
+ jc->j_workcurs[i]->c_close(jc->j_workcurs[i])) != 0)
+ ret = t_ret;
+ if (jc->j_fdupcurs[i] != NULL && (t_ret =
+ jc->j_fdupcurs[i]->c_close(jc->j_fdupcurs[i])) != 0)
+ ret = t_ret;
+ }
+
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc->j_curslist);
+ __os_free(dbenv, jc->j_workcurs);
+ __os_free(dbenv, jc->j_fdupcurs);
+ __os_free(dbenv, jc->j_key.data);
+ if (jc->j_rdata.data != NULL)
+ __os_ufree(dbenv, jc->j_rdata.data);
+ __os_free(dbenv, jc);
+ __os_free(dbenv, dbc);
+
+ return (ret);
+}
+
+/*
+ * __db_join_getnext --
+ * This function replaces the DBC_CONTINUE and DBC_KEYSET
+ * functionality inside the various cursor get routines.
+ *
+ * If exhausted == 0, we're not done with the current datum;
+ * return it if it matches "matching", otherwise search
+ * using DB_GET_BOTHC (which is faster than iteratively doing
+ * DB_NEXT_DUP) forward until we find one that does.
+ *
+ * If exhausted == 1, we are done with the current datum, so just
+ * leap forward to searching NEXT_DUPs.
+ *
+ * If no matching datum exists, returns DB_NOTFOUND, else 0.
+ */
+static int
+__db_join_getnext(dbc, key, data, exhausted, opmods)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t exhausted, opmods;
+{
+ int ret, cmp;
+ DB *dbp;
+ DBT ldata;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ dbp = dbc->dbp;
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ switch (exhausted) {
+ case 0:
+ /*
+ * We don't want to step on data->data; use a new
+ * DBT and malloc so we don't step on dbc's rdata memory.
+ */
+ memset(&ldata, 0, sizeof(DBT));
+ F_SET(&ldata, DB_DBT_MALLOC);
+ if ((ret = dbc->c_real_get(dbc,
+ key, &ldata, opmods | DB_CURRENT)) != 0)
+ break;
+ cmp = func(dbp, data, &ldata);
+ if (cmp == 0) {
+ /*
+ * We have to return the real data value. Copy
+ * it into data, then free the buffer we malloc'ed
+ * above.
+ */
+ if ((ret = __db_retcopy(dbp->dbenv, data, ldata.data,
+ ldata.size, &data->data, &data->size)) != 0)
+ return (ret);
+ __os_ufree(dbp->dbenv, ldata.data);
+ return (0);
+ }
+
+ /*
+ * Didn't match--we want to fall through and search future
+ * dups. We just forget about ldata and free
+ * its buffer--data contains the value we're searching for.
+ */
+ __os_ufree(dbp->dbenv, ldata.data);
+ /* FALLTHROUGH */
+ case 1:
+ ret = dbc->c_real_get(dbc, key, data, opmods | DB_GET_BOTHC);
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_join_cmp --
+ * Comparison function for sorting DBCs in cardinality order.
+ */
+static int
+__db_join_cmp(a, b)
+ const void *a, *b;
+{
+ DBC *dbca, *dbcb;
+ db_recno_t counta, countb;
+
+ /* In case c_count fails, pretend cursors are equal. */
+ counta = countb = 0;
+
+ dbca = *((DBC * const *)a);
+ dbcb = *((DBC * const *)b);
+
+ if (dbca->c_count(dbca, &counta, 0) != 0 ||
+ dbcb->c_count(dbcb, &countb, 0) != 0)
+ return (0);
+
+ return (counta - countb);
+}
+
+/*
+ * __db_join_primget --
+ * Perform a DB->get in the primary, being careful not to use a new
+ * locker ID if we're doing CDB locking.
+ */
+static int
+__db_join_primget(dbp, txn, lockerid, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t lockerid;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int dirty, ret, rmw, t_ret;
+
+ /*
+ * The only allowable flags here are the two flags copied into
+ * "opmods" in __db_join_get, DB_RMW and DB_DIRTY_READ. The former
+ * is an op on the c_get call, the latter on the cursor call.
+ * It's a DB bug if we allow any other flags down in here.
+ */
+ rmw = LF_ISSET(DB_RMW);
+ dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_RMW | DB_DIRTY_READ);
+ DB_ASSERT(flags == 0);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0)
+ return (ret);
+
+ if (dirty ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
+ F_SET(dbc, DBC_TRANSIENT);
+
+ /*
+ * This shouldn't be necessary, thanks to the fact that join cursors
+ * swap in their own DB_DBT_REALLOC'ed buffers, but just for form's
+ * sake, we mirror what __db_get does.
+ */
+ SET_RET_MEM(dbc, dbp);
+
+ ret = dbc->c_get(dbc, key, data, DB_SET | rmw);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_meta.c b/storage/bdb/db/db_meta.c
new file mode 100644
index 00000000000..015ef5c8fc7
--- /dev/null
+++ b/storage/bdb/db/db_meta.c
@@ -0,0 +1,452 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_meta.c,v 11.61 2002/08/08 03:57:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
+
+static void __db_init_meta __P((void *, u_int32_t, db_pgno_t, u_int32_t));
+
+/*
+ * __db_init_meta --
+ * Helper function for __db_new that initializes the important fields in
+ * a meta-data page (used instead of P_INIT). We need to make sure that we
+ * retain the page number and LSN of the existing page.
+ */
+static void
+__db_init_meta(p, pgsize, pgno, pgtype)
+ void *p;
+ u_int32_t pgsize;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB_LSN save_lsn;
+ DBMETA *meta;
+
+ meta = (DBMETA *)p;
+ save_lsn = meta->lsn;
+ memset(meta, 0, sizeof(DBMETA));
+ meta->lsn = save_lsn;
+ meta->pagesize = pgsize;
+ meta->pgno = pgno;
+ meta->type = (u_int8_t)pgtype;
+}
+
+/*
+ * __db_new --
+ * Get a new page, preferably from the freelist.
+ *
+ * PUBLIC: int __db_new __P((DBC *, u_int32_t, PAGE **));
+ */
+int
+__db_new(dbc, type, pagepp)
+ DBC *dbc;
+ u_int32_t type;
+ PAGE **pagepp;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno, newnext;
+ int meta_flags, extend, ret;
+
+ meta = NULL;
+ meta_flags = 0;
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ h = NULL;
+ newnext = PGNO_INVALID;
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ if (meta->free == PGNO_INVALID) {
+ pgno = meta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ extend = 1;
+ } else {
+ pgno = meta->free;
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ /*
+ * We want to take the first page off the free list and
+ * then set meta->free to the that page's next_pgno, but
+ * we need to log the change first.
+ */
+ newnext = h->next_pgno;
+ lsn = h->lsn;
+ extend = 0;
+ }
+
+ /*
+ * Log the allocation before fetching the new page. If we
+ * don't have room in the log then we don't want to tell
+ * mpool to extend the file.
+ */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_pg_alloc_log(dbp, dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD, &lsn, pgno,
+ (u_int32_t)type, newnext)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
+
+ meta_flags = DB_MPOOL_DIRTY;
+ meta->free = newnext;
+
+ if (extend == 1) {
+ meta->last_pgno++;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_NEW, &h)) != 0)
+ goto err;
+ ZERO_LSN(h->lsn);
+ h->pgno = pgno;
+ DB_ASSERT(pgno == meta->last_pgno);
+ }
+ LSN(h) = LSN(meta);
+
+ DB_ASSERT(TYPE(h) == P_INVALID);
+
+ if (TYPE(h) != P_INVALID)
+ return (__db_panic(dbp->dbenv, EINVAL));
+
+ (void)mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+ (void)__TLPUT(dbc, metalock);
+
+ switch (type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ __db_init_meta(h, dbp->pgsize, h->pgno, type);
+ break;
+ default:
+ P_INIT(h, dbp->pgsize,
+ h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type);
+ break;
+ }
+
+ /*
+ * If dirty reads are enabled and we are in a transaction, we could
+ * abort this allocation after the page(s) pointing to this
+ * one have their locks downgraded. This would permit dirty readers
+ * to access this page which is ok, but they must be off the
+ * page when we abort. This will also prevent updates happening
+ * to this page until we commit.
+ */
+ if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && dbc->txn != NULL) {
+ if ((ret = __db_lget(dbc, 0,
+ h->pgno, DB_LOCK_WWRITE, 0, &metalock)) != 0)
+ goto err;
+ }
+ *pagepp = h;
+ return (0);
+
+err: if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, meta_flags);
+ (void)__TLPUT(dbc, metalock);
+ return (ret);
+}
+
+/*
+ * __db_free --
+ * Add a page to the head of the freelist.
+ *
+ * PUBLIC: int __db_free __P((DBC *, PAGE *));
+ */
+int
+__db_free(dbc, h)
+ DBC *dbc;
+ PAGE *h;
+{
+ DBMETA *meta;
+ DB *dbp;
+ DBT ldbt;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t pgno;
+ u_int32_t dirty_flag;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /*
+ * Retrieve the metadata page and insert the page at the head of
+ * the free list. If either the lock get or page get routines
+ * fail, then we need to put the page with which we were called
+ * back because our caller assumes we take care of it.
+ */
+ dirty_flag = 0;
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0) {
+ (void)__TLPUT(dbc, metalock);
+ goto err;
+ }
+
+ DB_ASSERT(h->pgno != meta->free);
+ /* Log the change. */
+ if (DBC_LOGGING(dbc)) {
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = h;
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ dbc->txn, &LSN(meta), 0, h->pgno,
+ &LSN(meta), PGNO_BASE_MD, &ldbt, meta->free)) != 0) {
+ (void)mpf->put(mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(dbc, metalock);
+ goto err;
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
+ LSN(h) = LSN(meta);
+
+ P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID);
+
+ meta->free = h->pgno;
+
+ /* Discard the metadata page. */
+ if ((t_ret =
+ mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the caller's page reference. */
+ dirty_flag = DB_MPOOL_DIRTY;
+err: if ((t_ret = mpf->put(mpf, h, dirty_flag)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * XXX
+ * We have to unlock the caller's page in the caller!
+ */
+ return (ret);
+}
+
+#ifdef DEBUG
+/*
+ * __db_lprint --
+ * Print out the list of locks currently held by a cursor.
+ *
+ * PUBLIC: int __db_lprint __P((DBC *));
+ */
+int
+__db_lprint(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCKREQ req;
+
+ dbp = dbc->dbp;
+
+ if (LOCKING_ON(dbp->dbenv)) {
+ req.op = DB_LOCK_DUMP;
+ dbp->dbenv->lock_vec(dbp->dbenv, dbc->locker, 0, &req, 1, NULL);
+ }
+ return (0);
+}
+#endif
+
+/*
+ * Implement the rules for transactional locking. We can release the previous
+ * lock if we are not in a transaction or COUPLE_ALWAYS is specifed (used in
+ * record locking). If we are doing dirty reads then we can release read locks
+ * and down grade write locks.
+ */
+#define DB_PUT_ACTION(dbc, action, lockp) \
+ (((action == LCK_COUPLE || action == LCK_COUPLE_ALWAYS) && \
+ LOCK_ISSET(*lockp)) ? \
+ (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS || \
+ (F_ISSET(dbc, DBC_DIRTY_READ) && \
+ (lockp)->mode == DB_LOCK_DIRTY)) ? LCK_COUPLE : \
+ (F_ISSET((dbc)->dbp, DB_AM_DIRTY) && \
+ (lockp)->mode == DB_LOCK_WRITE) ? LCK_DOWNGRADE : 0 : 0)
+
+/*
+ * __db_lget --
+ * The standard lock get call.
+ *
+ * PUBLIC: int __db_lget __P((DBC *,
+ * PUBLIC: int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *));
+ */
+int
+__db_lget(dbc, action, pgno, mode, lkflags, lockp)
+ DBC *dbc;
+ int action;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ u_int32_t lkflags;
+ DB_LOCK *lockp;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_LOCKREQ couple[2], *reqp;
+ DB_TXN *txn;
+ int has_timeout, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ txn = dbc->txn;
+
+ /*
+ * We do not always check if we're configured for locking before
+ * calling __db_lget to acquire the lock.
+ */
+ if (CDB_LOCKING(dbenv) ||
+ !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE) ||
+ (F_ISSET(dbc, DBC_RECOVER) &&
+ (action != LCK_ROLLBACK || F_ISSET(dbenv, DB_ENV_REP_CLIENT))) ||
+ (action != LCK_ALWAYS && F_ISSET(dbc, DBC_OPD))) {
+ LOCK_INIT(*lockp);
+ return (0);
+ }
+
+ dbc->lock.pgno = pgno;
+ if (lkflags & DB_LOCK_RECORD)
+ dbc->lock.type = DB_RECORD_LOCK;
+ else
+ dbc->lock.type = DB_PAGE_LOCK;
+ lkflags &= ~DB_LOCK_RECORD;
+
+ /*
+ * If the transaction enclosing this cursor has DB_LOCK_NOWAIT set,
+ * pass that along to the lock call.
+ */
+ if (DB_NONBLOCK(dbc))
+ lkflags |= DB_LOCK_NOWAIT;
+
+ if (F_ISSET(dbc, DBC_DIRTY_READ) && mode == DB_LOCK_READ)
+ mode = DB_LOCK_DIRTY;
+
+ has_timeout = txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT);
+
+ switch (DB_PUT_ACTION(dbc, action, lockp)) {
+ case LCK_COUPLE:
+lck_couple: couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET;
+ couple[0].obj = &dbc->lock_dbt;
+ couple[0].mode = mode;
+ if (action == LCK_COUPLE_ALWAYS)
+ action = LCK_COUPLE;
+ UMRW_SET(couple[0].timeout);
+ if (has_timeout)
+ couple[0].timeout = txn->lock_timeout;
+ if (action == LCK_COUPLE) {
+ couple[1].op = DB_LOCK_PUT;
+ couple[1].lock = *lockp;
+ }
+
+ ret = dbenv->lock_vec(dbenv, dbc->locker,
+ lkflags, couple, action == LCK_COUPLE ? 2 : 1, &reqp);
+ if (ret == 0 || reqp == &couple[1])
+ *lockp = couple[0].lock;
+ break;
+ case LCK_DOWNGRADE:
+ if ((ret = dbenv->lock_downgrade(
+ dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0)
+ return (ret);
+ /* FALL THROUGH */
+ default:
+ if (has_timeout)
+ goto lck_couple;
+ ret = dbenv->lock_get(dbenv,
+ dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_lput --
+ * The standard lock put call.
+ *
+ * PUBLIC: int __db_lput __P((DBC *, DB_LOCK *));
+ */
+int
+__db_lput(dbc, lockp)
+ DBC *dbc;
+ DB_LOCK *lockp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+
+ switch (DB_PUT_ACTION(dbc, LCK_COUPLE, lockp)) {
+ case LCK_COUPLE:
+ ret = dbenv->lock_put(dbenv, lockp);
+ break;
+ case LCK_DOWNGRADE:
+ ret = __lock_downgrade(dbenv, lockp, DB_LOCK_WWRITE, 0);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_method.c b/storage/bdb/db/db_method.c
new file mode 100644
index 00000000000..14712180df0
--- /dev/null
+++ b/storage/bdb/db/db_method.c
@@ -0,0 +1,691 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_method.c,v 11.78 2002/07/02 19:26:55 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __db_get_byteswapped __P((DB *, int *));
+static int __db_get_type __P((DB *, DBTYPE *dbtype));
+static int __db_init __P((DB *, u_int32_t));
+static int __db_key_range
+ __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+static int __db_set_alloc __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+static int __db_set_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+static int __db_set_cache_priority __P((DB *, DB_CACHE_PRIORITY));
+static int __db_set_dup_compare
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+static int __db_set_encrypt __P((DB *, const char *, u_int32_t));
+static int __db_set_feedback __P((DB *, void (*)(DB *, int, int)));
+static int __db_set_flags __P((DB *, u_int32_t));
+static int __db_set_pagesize __P((DB *, u_int32_t));
+static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int)));
+static void __db_set_errcall __P((DB *, void (*)(const char *, char *)));
+static void __db_set_errfile __P((DB *, FILE *));
+static void __db_set_errpfx __P((DB *, const char *));
+static int __db_stat_fail __P((DB *, void *, u_int32_t));
+static void __dbh_err __P((DB *, int, const char *, ...));
+static void __dbh_errx __P((DB *, const char *, ...));
+
+#ifdef HAVE_RPC
+static int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
+#endif
+
+/*
+ * db_create --
+ * DB constructor.
+ *
+ * EXTERN: int db_create __P((DB **, DB_ENV *, u_int32_t));
+ */
+int
+db_create(dbpp, dbenv, flags)
+ DB **dbpp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+
+ /* Check for invalid function flags. */
+ switch (flags) {
+ case 0:
+ break;
+ case DB_XA_CREATE:
+ if (dbenv != NULL) {
+ __db_err(dbenv,
+ "XA applications may not specify an environment to db_create");
+ return (EINVAL);
+ }
+
+ /*
+ * If it's an XA database, open it within the XA environment,
+ * taken from the global list of environments. (When the XA
+ * transaction manager called our xa_start() routine the
+ * "current" environment was moved to the start of the list.
+ */
+ dbenv = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ break;
+ default:
+ return (__db_ferr(dbenv, "db_create", 0));
+ }
+
+ /* Allocate the DB. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0)
+ return (ret);
+#ifdef HAVE_RPC
+ if (dbenv != NULL && RPC_ON(dbenv))
+ ret = __dbcl_init(dbp, dbenv, flags);
+ else
+#endif
+ ret = __db_init(dbp, flags);
+ if (ret != 0) {
+ __os_free(dbenv, dbp);
+ return (ret);
+ }
+
+ /* If we don't have an environment yet, allocate a local one. */
+ if (dbenv == NULL) {
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ __os_free(dbenv, dbp);
+ return (ret);
+ }
+ F_SET(dbenv, DB_ENV_DBLOCAL);
+ }
+ ++dbenv->db_ref;
+
+ dbp->dbenv = dbenv;
+
+ *dbpp = dbp;
+ return (0);
+}
+
+/*
+ * __db_init --
+ * Initialize a DB structure.
+ */
+static int
+__db_init(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ TAILQ_INIT(&dbp->join_queue);
+ LIST_INIT(&dbp->s_secondaries);
+
+ FLD_SET(dbp->am_ok,
+ DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->associate = __db_associate;
+ dbp->close = __db_close;
+ dbp->cursor = __db_cursor;
+ dbp->del = __db_delete;
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __db_fd;
+ dbp->get = __db_get;
+ dbp->get_byteswapped = __db_get_byteswapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __db_join;
+ dbp->key_range = __db_key_range;
+ dbp->open = __db_open;
+ dbp->pget = __db_pget;
+ dbp->put = __db_put;
+ dbp->remove = __db_remove;
+ dbp->rename = __db_rename;
+ dbp->truncate = __db_truncate;
+ dbp->set_alloc = __db_set_alloc;
+ dbp->set_append_recno = __db_set_append_recno;
+ dbp->set_cachesize = __db_set_cachesize;
+ dbp->set_cache_priority = __db_set_cache_priority;
+ dbp->set_dup_compare = __db_set_dup_compare;
+ dbp->set_encrypt = __db_set_encrypt;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __db_set_feedback;
+ dbp->set_flags = __db_set_flags;
+ dbp->set_lorder = __db_set_lorder;
+ dbp->set_pagesize = __db_set_pagesize;
+ dbp->set_paniccall = __db_set_paniccall;
+ dbp->stat = __db_stat_fail;
+ dbp->sync = __db_sync;
+ dbp->upgrade = __db_upgrade;
+ dbp->verify = __db_verify;
+
+ /* Access method specific. */
+ if ((ret = __bam_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __ham_db_create(dbp)) != 0)
+ return (ret);
+ if ((ret = __qam_db_create(dbp)) != 0)
+ return (ret);
+
+ /*
+ * XA specific: must be last, as we replace methods set by the
+ * access methods.
+ */
+ if (LF_ISSET(DB_XA_CREATE) && (ret = __db_xa_create(dbp)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __dbh_am_chk --
+ * Error if an unreasonable method is called.
+ *
+ * PUBLIC: int __dbh_am_chk __P((DB *, u_int32_t));
+ */
+int
+__dbh_am_chk(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ /*
+ * We start out allowing any access methods to be called, and as the
+ * application calls the methods the options become restricted. The
+ * idea is to quit as soon as an illegal method combination is called.
+ */
+ if ((LF_ISSET(DB_OK_BTREE) && FLD_ISSET(dbp->am_ok, DB_OK_BTREE)) ||
+ (LF_ISSET(DB_OK_HASH) && FLD_ISSET(dbp->am_ok, DB_OK_HASH)) ||
+ (LF_ISSET(DB_OK_QUEUE) && FLD_ISSET(dbp->am_ok, DB_OK_QUEUE)) ||
+ (LF_ISSET(DB_OK_RECNO) && FLD_ISSET(dbp->am_ok, DB_OK_RECNO))) {
+ FLD_CLR(dbp->am_ok, ~flags);
+ return (0);
+ }
+
+ __db_err(dbp->dbenv,
+ "call implies an access method which is inconsistent with previous calls");
+ return (EINVAL);
+}
+
+/*
+ * __dbh_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbh_err(DB *dbp, int error, const char *fmt, ...)
+#else
+__dbh_err(dbp, error, fmt, va_alist)
+ DB *dbp;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbp->dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbh_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbh_errx(DB *dbp, const char *fmt, ...)
+#else
+__dbh_errx(dbp, fmt, va_alist)
+ DB *dbp;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbp->dbenv, 0, 0, 1, fmt);
+}
+
+/*
+ * __db_get_byteswapped --
+ * Return if database requires byte swapping.
+ */
+static int
+__db_get_byteswapped(dbp, isswapped)
+ DB *dbp;
+ int *isswapped;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_byteswapped");
+
+ *isswapped = F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0;
+ return (0);
+}
+
+/*
+ * __db_get_type --
+ * Return type of underlying database.
+ */
+static int
+__db_get_type(dbp, dbtype)
+ DB *dbp;
+ DBTYPE *dbtype;
+{
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "get_type");
+
+ *dbtype = dbp->type;
+ return (0);
+}
+
+/*
+ * __db_key_range --
+ * Return proportion of keys above and below given key.
+ */
+static int
+__db_key_range(dbp, txn, key, kr, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ DB_KEY_RANGE *kr;
+ u_int32_t flags;
+{
+ COMPQUIET(txn, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(kr, NULL);
+ COMPQUIET(flags, 0);
+
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "key_range");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE);
+
+ return (EINVAL);
+}
+
+/*
+ * __db_set_append_recno --
+ * Set record number append routine.
+ */
+static int
+__db_set_append_recno(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, DBT *, db_recno_t));
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_append_recno");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE | DB_OK_RECNO);
+
+ dbp->db_append_recno = func;
+
+ return (0);
+}
+
+/*
+ * __db_set_cachesize --
+ * Set underlying cache size.
+ */
+static int
+__db_set_cachesize(dbp, cache_gbytes, cache_bytes, ncache)
+ DB *dbp;
+ u_int32_t cache_gbytes, cache_bytes;
+ int ncache;
+{
+ DB_ILLEGAL_IN_ENV(dbp, "set_cachesize");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_cachesize");
+
+ return (dbp->dbenv->set_cachesize(
+ dbp->dbenv, cache_gbytes, cache_bytes, ncache));
+}
+
+/*
+ * __db_set_cache_priority --
+ * Set cache priority for pages from this file.
+ */
+static int
+__db_set_cache_priority(dbp, priority)
+ DB *dbp;
+ DB_CACHE_PRIORITY priority;
+{
+ /*
+ * If an underlying DB_MPOOLFILE exists, call it. Otherwise, save
+ * the information away until DB->open is called.
+ */
+ if (dbp->mpf == NULL) {
+ dbp->priority = priority;
+ return (0);
+ }
+ return (dbp->mpf->set_priority(dbp->mpf, priority));
+}
+
+/*
+ * __db_set_dup_compare --
+ * Set duplicate comparison routine.
+ */
+static int
+__db_set_dup_compare(dbp, func)
+ DB *dbp;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+{
+ int ret;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "dup_compare");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+
+ if ((ret = dbp->set_flags(dbp, DB_DUPSORT)) != 0)
+ return (ret);
+
+ dbp->dup_compare = func;
+
+ return (0);
+}
+
+/*
+ * __db_set_encrypt --
+ * Set database passwd.
+ */
+static int
+__db_set_encrypt(dbp, passwd, flags)
+ DB *dbp;
+ const char *passwd;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ DB_ILLEGAL_IN_ENV(dbp, "set_encrypt");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_encrypt");
+
+ if ((ret = dbp->dbenv->set_encrypt(dbp->dbenv, passwd, flags)) != 0)
+ return (ret);
+
+ /*
+ * In a real env, this gets initialized with the region. In a local
+ * env, we must do it here.
+ */
+ db_cipher = (DB_CIPHER *)dbp->dbenv->crypto_handle;
+ if (!F_ISSET(db_cipher, CIPHER_ANY) &&
+ (ret = db_cipher->init(dbp->dbenv, db_cipher)) != 0)
+ return (ret);
+
+ return (dbp->set_flags(dbp, DB_ENCRYPT));
+}
+
+static void
+__db_set_errcall(dbp, errcall)
+ DB *dbp;
+ void (*errcall) __P((const char *, char *));
+{
+ dbp->dbenv->set_errcall(dbp->dbenv, errcall);
+}
+
+static void
+__db_set_errfile(dbp, errfile)
+ DB *dbp;
+ FILE *errfile;
+{
+ dbp->dbenv->set_errfile(dbp->dbenv, errfile);
+}
+
+static void
+__db_set_errpfx(dbp, errpfx)
+ DB *dbp;
+ const char *errpfx;
+{
+ dbp->dbenv->set_errpfx(dbp->dbenv, errpfx);
+}
+
+static int
+__db_set_feedback(dbp, feedback)
+ DB *dbp;
+ void (*feedback) __P((DB *, int, int));
+{
+ dbp->db_feedback = feedback;
+ return (0);
+}
+
+static int
+__db_set_flags(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int ret;
+
+ /*
+ * !!!
+ * The hash access method only takes two flags: DB_DUP and DB_DUPSORT.
+ * The Btree access method uses them for the same purposes, and so we
+ * resolve them there.
+ *
+ * The queue access method takes no flags.
+ */
+ if (LF_ISSET(DB_ENCRYPT)) {
+ if (!CRYPTO_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "Database environment not configured for encryption");
+ return (EINVAL);
+ }
+ F_SET(dbp, DB_AM_ENCRYPT);
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_ENCRYPT);
+ }
+ if (LF_ISSET(DB_CHKSUM_SHA1)) {
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_CHKSUM_SHA1);
+ }
+
+ if ((ret = __bam_set_flags(dbp, &flags)) != 0)
+ return (ret);
+ if ((ret = __ram_set_flags(dbp, &flags)) != 0)
+ return (ret);
+
+ return (flags == 0 ? 0 : __db_ferr(dbp->dbenv, "DB->set_flags", 0));
+}
+
+/*
+ * __db_set_lorder --
+ * Set whether lorder is swapped or not.
+ *
+ * PUBLIC: int __db_set_lorder __P((DB *, int));
+ */
+int
+__db_set_lorder(dbp, db_lorder)
+ DB *dbp;
+ int db_lorder;
+{
+ int ret;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_lorder");
+
+ /* Flag if the specified byte order requires swapping. */
+ switch (ret = __db_byteorder(dbp->dbenv, db_lorder)) {
+ case 0:
+ F_CLR(dbp, DB_AM_SWAP);
+ break;
+ case DB_SWAPBYTES:
+ F_SET(dbp, DB_AM_SWAP);
+ break;
+ default:
+ return (ret);
+ /* NOTREACHED */
+ }
+ return (0);
+}
+
+static int
+__db_set_alloc(dbp, mal_func, real_func, free_func)
+ DB *dbp;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ DB_ILLEGAL_IN_ENV(dbp, "set_alloc");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_alloc");
+
+ return (dbp->dbenv->set_alloc(dbp->dbenv,
+ mal_func, real_func, free_func));
+}
+
+static int
+__db_set_pagesize(dbp, db_pagesize)
+ DB *dbp;
+ u_int32_t db_pagesize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_pagesize");
+
+ if (db_pagesize < DB_MIN_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be smaller than %lu",
+ (u_long)DB_MIN_PGSIZE);
+ return (EINVAL);
+ }
+ if (db_pagesize > DB_MAX_PGSIZE) {
+ __db_err(dbp->dbenv, "page sizes may not be larger than %lu",
+ (u_long)DB_MAX_PGSIZE);
+ return (EINVAL);
+ }
+
+ /*
+ * We don't want anything that's not a power-of-2, as we rely on that
+ * for alignment of various types on the pages.
+ */
+ if (!POWER_OF_TWO(db_pagesize)) {
+ __db_err(dbp->dbenv, "page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+
+ /*
+ * XXX
+ * Should we be checking for a page size that's not a multiple of 512,
+ * so that we never try and write less than a disk sector?
+ */
+ dbp->pgsize = db_pagesize;
+
+ return (0);
+}
+
+static int
+__db_set_paniccall(dbp, paniccall)
+ DB *dbp;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ return (dbp->dbenv->set_paniccall(dbp->dbenv, paniccall));
+}
+
+static int
+__db_stat_fail(dbp, sp, flags)
+ DB *dbp;
+ void *sp;
+ u_int32_t flags;
+{
+ COMPQUIET(sp, NULL);
+ COMPQUIET(flags, 0);
+
+ /*
+ * DB->stat isn't initialized until the actual DB->open call,
+ * but we don't want to core dump.
+ */
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ /* NOTREACHED */
+ return (EINVAL);
+}
+
+#ifdef HAVE_RPC
+/*
+ * __dbcl_init --
+ * Initialize a DB structure on the server.
+ */
+static int
+__dbcl_init(dbp, dbenv, flags)
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ /* !!!
+ * Note that we don't need to initialize the join_queue; it's
+ * not used in RPC clients. See the comment in __dbcl_db_join_ret().
+ */
+
+ dbp->associate = __dbcl_db_associate;
+ dbp->close = __dbcl_db_close;
+ dbp->cursor = __dbcl_db_cursor;
+ dbp->del = __dbcl_db_del;
+ dbp->err = __dbh_err;
+ dbp->errx = __dbh_errx;
+ dbp->fd = __dbcl_db_fd;
+ dbp->get = __dbcl_db_get;
+ dbp->get_byteswapped = __db_get_byteswapped;
+ dbp->get_type = __db_get_type;
+ dbp->join = __dbcl_db_join;
+ dbp->key_range = __dbcl_db_key_range;
+ dbp->open = __dbcl_db_open_wrap;
+ dbp->pget = __dbcl_db_pget;
+ dbp->put = __dbcl_db_put;
+ dbp->remove = __dbcl_db_remove;
+ dbp->rename = __dbcl_db_rename;
+ dbp->set_alloc = __dbcl_db_alloc;
+ dbp->set_append_recno = __dbcl_db_set_append_recno;
+ dbp->set_cachesize = __dbcl_db_cachesize;
+ dbp->set_cache_priority = __dbcl_db_cache_priority;
+ dbp->set_dup_compare = __dbcl_db_dup_compare;
+ dbp->set_encrypt = __dbcl_db_encrypt;
+ dbp->set_errcall = __db_set_errcall;
+ dbp->set_errfile = __db_set_errfile;
+ dbp->set_errpfx = __db_set_errpfx;
+ dbp->set_feedback = __dbcl_db_feedback;
+ dbp->set_flags = __dbcl_db_flags;
+ dbp->set_lorder = __dbcl_db_lorder;
+ dbp->set_pagesize = __dbcl_db_pagesize;
+ dbp->set_paniccall = __dbcl_db_panic;
+ dbp->stat = __dbcl_db_stat;
+ dbp->sync = __dbcl_db_sync;
+ dbp->truncate = __dbcl_db_truncate;
+ dbp->upgrade = __dbcl_db_upgrade;
+ dbp->verify = __dbcl_db_verify;
+
+ /*
+ * Set all the method specific functions to client funcs as well.
+ */
+ dbp->set_bt_compare = __dbcl_db_bt_compare;
+ dbp->set_bt_maxkey = __dbcl_db_bt_maxkey;
+ dbp->set_bt_minkey = __dbcl_db_bt_minkey;
+ dbp->set_bt_prefix = __dbcl_db_bt_prefix;
+ dbp->set_h_ffactor = __dbcl_db_h_ffactor;
+ dbp->set_h_hash = __dbcl_db_h_hash;
+ dbp->set_h_nelem = __dbcl_db_h_nelem;
+ dbp->set_q_extentsize = __dbcl_db_extentsize;
+ dbp->set_re_delim = __dbcl_db_re_delim;
+ dbp->set_re_len = __dbcl_db_re_len;
+ dbp->set_re_pad = __dbcl_db_re_pad;
+ dbp->set_re_source = __dbcl_db_re_source;
+
+ return (__dbcl_db_create(dbp, dbenv, flags));
+}
+#endif
diff --git a/storage/bdb/db/db_open.c b/storage/bdb/db/db_open.c
new file mode 100644
index 00000000000..8352525361f
--- /dev/null
+++ b/storage/bdb/db/db_open.c
@@ -0,0 +1,703 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_open.c,v 11.215 2002/08/15 15:27:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_openchk __P((DB *,
+ DB_TXN *, const char *, const char *, DBTYPE, u_int32_t));
+
+/*
+ * __db_open --
+ * Main library interface to the DB access methods.
+ *
+ * PUBLIC: int __db_open __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int));
+ */
+int
+__db_open(dbp, txn, name, subdb, type, flags, mode)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ DB_ENV *dbenv;
+ int remove_master, remove_me, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ remove_me = remove_master = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __db_openchk(dbp, txn, name, subdb, type, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * If the environment was configured with threads, the DB handle
+ * must also be free-threaded, so we force the DB_THREAD flag on.
+ * (See SR #2033 for why this is a requirement--recovery needs
+ * to be able to grab a dbp using __db_fileid_to_dbp, and it has
+ * no way of knowing which dbp goes with which thread, so whichever
+ * one it finds has to be usable in any of them.)
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ LF_SET(DB_THREAD);
+
+ /* Convert any DB->open flags. */
+ if (LF_ISSET(DB_RDONLY))
+ F_SET(dbp, DB_AM_RDONLY);
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(dbp, DB_AM_DIRTY);
+
+ /* Fill in the type. */
+ dbp->type = type;
+
+ /*
+ * If we're opening a subdatabase, we have to open (and potentially
+ * create) the main database, and then get (and potentially store)
+ * our base page number in that database. Then, we can finally open
+ * the subdatabase.
+ */
+ if ((ret = __db_dbopen(
+ dbp, txn, name, subdb, flags, mode, PGNO_BASE_MD)) != 0)
+ goto err;
+
+ /*
+ * You can open the database that describes the subdatabases in the
+ * rest of the file read-only. The content of each key's data is
+ * unspecified and applications should never be adding new records
+ * or updating existing records. However, during recovery, we need
+ * to open these databases R/W so we can redo/undo changes in them.
+ * Likewise, we need to open master databases read/write during
+ * rename and remove so we can be sure they're fully sync'ed, so
+ * we provide an override flag for the purpose.
+ */
+ if (subdb == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) &&
+ !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "files containing multiple databases may only be opened read-only");
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /* If we were successful, don't discard the file on close. */
+ if (ret == 0)
+ /* If we were successful, don't discard the file on close. */
+ F_CLR(dbp, DB_AM_DISCARD | DB_AM_CREATED | DB_AM_CREATED_MSTR);
+ else {
+ /*
+ * If we are not transactional, we need to remove the
+ * databases/subdatabases. If we are transactional, then
+ * the abort of the child transaction should take care of
+ * cleaning them up.
+ */
+ remove_me = txn == NULL && F_ISSET(dbp, DB_AM_CREATED);
+ remove_master = txn == NULL && F_ISSET(dbp, DB_AM_CREATED_MSTR);
+
+ /*
+ * If we had an error, it may have happened before or after
+ * we actually logged the open. If it happened before, then
+ * abort won't know anything about it and won't close or
+ * refresh the dbp, so we need to do it explicitly.
+ */
+ (void)__db_refresh(dbp, txn, DB_NOSYNC);
+ }
+
+ /* Remove anyone we created. */
+ if (remove_master || (subdb == NULL && remove_me))
+ /* Remove file. */
+ (void)dbenv->dbremove(dbenv, txn, name, NULL, 0);
+ else if (remove_me)
+ /* Remove subdatabase. */
+ (void)dbenv->dbremove(dbenv, txn, name, subdb, 0);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_dbopen --
+ * Open a database. This routine gets called in three different ways.
+ * 1. It can be called to open a file/database. In this case, subdb will
+ * be NULL and meta_pgno will be PGNO_BASE_MD.
+ * 2. It can be called to open a subdatabase during normal operation. In
+ * this case, name and subname will both be non-NULL and meta_pgno will
+ * be PGNO_BAS_MD (also PGNO_INVALID).
+ * 3. It can be called during recovery to open a subdatabase in which case
+ * name will be non-NULL, subname mqy be NULL and meta-pgno will be
+ * a valid pgno (i.e., not PGNO_BASE_MD).
+ *
+ * PUBLIC: int __db_dbopen __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, u_int32_t, int, db_pgno_t));
+ */
+int
+__db_dbopen(dbp, txn, name, subdb, flags, mode, meta_pgno)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+ int mode;
+ db_pgno_t meta_pgno;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t id;
+
+ dbenv = dbp->dbenv;
+ id = TXN_INVALID;
+ if (txn != NULL)
+ F_SET(dbp, DB_AM_TXN);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, name);
+ /*
+ * If name is NULL, it's always a create, so make sure that we
+ * have a type specified. It would be nice if this checking
+ * were done in __db_open where most of the interface checking
+ * is done, but this interface (__db_dbopen) is used by the
+ * recovery and limbo system, so we need to safeguard this
+ * interface as well.
+ */
+ if (name == NULL) {
+ F_SET(dbp, DB_AM_INMEM);
+
+ if (dbp->type == DB_UNKNOWN) {
+ __db_err(dbenv,
+ "DBTYPE of unknown without existing file");
+ return (EINVAL);
+ }
+
+ if (dbp->pgsize == 0)
+ dbp->pgsize = DB_DEF_IOSIZE;
+
+ /*
+ * If the file is a temporary file and we're doing locking,
+ * then we have to create a unique file ID. We can't use our
+ * normal dev/inode pair (or whatever this OS uses in place of
+ * dev/inode pairs) because no backing file will be created
+ * until the mpool cache is filled forcing the buffers to disk.
+ * Grab a random locker ID to use as a file ID. The created
+ * ID must never match a potential real file ID -- we know it
+ * won't because real file IDs contain a time stamp after the
+ * dev/inode pair, and we're simply storing a 4-byte value.
+ *
+ * !!!
+ * Store the locker in the file id structure -- we can get it
+ * from there as necessary, and it saves having two copies.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_id(dbenv,
+ (u_int32_t *)dbp->fileid)) != 0)
+ return (ret);
+ } else if (subdb == NULL && meta_pgno == PGNO_BASE_MD) {
+ /* Open/create the underlying file. Acquire locks. */
+ if ((ret =
+ __fop_file_setup(dbp, txn, name, mode, flags, &id)) != 0)
+ return (ret);
+ } else {
+ if ((ret = __fop_subdb_setup(dbp,
+ txn, name, subdb, mode, flags)) != 0)
+ return (ret);
+ meta_pgno = dbp->meta_pgno;
+ }
+
+ /*
+ * If we created the file, set the truncate flag for the mpool. This
+ * isn't for anything we've done, it's protection against stupid user
+ * tricks: if the user deleted a file behind Berkeley DB's back, we
+ * may still have pages in the mpool that match the file's "unique" ID.
+ *
+ * Note that if we're opening a subdatabase, we don't want to set
+ * the TRUNCATE flag even if we just created the file--we already
+ * opened and updated the master using access method interfaces,
+ * so we don't want to get rid of any pages that are in the mpool.
+ * If we created the file when we opened the master, we already hit
+ * this check in a non-subdb context then.
+ */
+ if (subdb == NULL && F_ISSET(dbp, DB_AM_CREATED))
+ LF_SET(DB_TRUNCATE);
+
+ /* Set up the underlying environment. */
+ if ((ret = __db_dbenv_setup(dbp, txn, name, id, flags)) != 0)
+ return (ret);
+
+ /*
+ * Set the open flag. We use it to mean that the dbp has gone
+ * through mpf setup, including dbreg_register. Also, below,
+ * the underlying access method open functions may want to do
+ * things like acquire cursors, so the open flag has to be set
+ * before calling them.
+ */
+ F_SET(dbp, DB_AM_OPEN_CALLED);
+
+ /*
+ * For unnamed files, we need to actually create the file now
+ * that the mpool is open.
+ */
+ if (name == NULL && (ret = __db_new_file(dbp, txn, NULL, NULL)) != 0)
+ return (ret);
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ ret = __bam_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_HASH:
+ ret = __ham_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_RECNO:
+ ret = __ram_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_QUEUE:
+ ret = __qam_open(dbp, txn, name, meta_pgno, mode, flags);
+ break;
+ case DB_UNKNOWN:
+ return (__db_unknown_type(dbenv, "__db_dbopen", dbp->type));
+ }
+ if (ret != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, name);
+
+ /*
+ * Unnamed files don't need handle locks, so we only have to check
+ * for a handle lock downgrade or lockevent in the case of named
+ * files.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECOVER) &&
+ name != NULL && LOCK_ISSET(dbp->handle_lock)) {
+ if (txn != NULL) {
+ ret = __txn_lockevent(dbenv,
+ txn, dbp, &dbp->handle_lock, dbp->lid);
+ } else if (LOCKING_ON(dbenv))
+ /* Trade write handle lock for read handle lock. */
+ ret = __lock_downgrade(dbenv,
+ &dbp->handle_lock, DB_LOCK_READ, 0);
+ }
+DB_TEST_RECOVERY_LABEL
+err:
+ return (ret);
+}
+
+/*
+ * __db_new_file --
+ * Create a new database file.
+ *
+ * PUBLIC: int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__db_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_HASH:
+ ret = __ham_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_QUEUE:
+ ret = __qam_new_file(dbp, txn, fhp, name);
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "%s: Invalid type %d specified", name, dbp->type);
+ ret = EINVAL;
+ break;
+ }
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name);
+ /* Sync the file in preparation for moving it into place. */
+ if (ret == 0 && fhp != NULL)
+ ret = __os_fsync(dbp->dbenv, fhp);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+ return (ret);
+}
+
+/*
+ * __db_init_subdb --
+ * Initialize the dbp for a subdb.
+ *
+ * PUBLIC: int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *));
+ */
+int
+__db_init_subdb(mdbp, dbp, name, txn)
+ DB *mdbp, *dbp;
+ const char *name;
+ DB_TXN *txn;
+{
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ ret = 0;
+ if (!F_ISSET(dbp, DB_AM_CREATED)) {
+ /* Subdb exists; read meta-data page and initialize. */
+ mpf = mdbp->mpf;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ ret = __db_meta_setup(mdbp->dbenv, dbp, name, meta, 0, 0);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * If __db_meta_setup found that the meta-page hadn't
+ * been written out during recovery, we can just return.
+ */
+ if (ret == ENOENT)
+ ret = 0;
+ goto err;
+ }
+
+ /* Handle the create case here. */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_HASH:
+ ret = __ham_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_QUEUE:
+ ret = EINVAL;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "Invalid subdatabase type %d specified", dbp->type);
+ return (EINVAL);
+ }
+
+err: return (ret);
+}
+
+/*
+ * __db_chk_meta --
+ * Take a buffer containing a meta-data page and check it for a checksum
+ * (and verify the checksum if necessary) and possibly decrypt it.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int));
+ */
+int
+__db_chk_meta(dbenv, dbp, meta, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBMETA *meta;
+ int do_metachk;
+{
+ int is_hmac, ret;
+ u_int8_t *chksum;
+
+ ret = 0;
+
+ if (FLD_ISSET(meta->metaflags, DBMETA_CHKSUM)) {
+ if (dbp != NULL)
+ F_SET(dbp, DB_AM_CHKSUM);
+
+ is_hmac = meta->encrypt_alg == 0 ? 0 : 1;
+ chksum = ((BTMETA *)meta)->chksum;
+ if (do_metachk && ((ret = __db_check_chksum(dbenv,
+ (DB_CIPHER *)dbenv->crypto_handle, chksum, meta,
+ DBMETASIZE, is_hmac)) != 0))
+ return (ret);
+ }
+
+#ifdef HAVE_CRYPTO
+ ret = __crypto_decrypt_meta(dbenv, dbp, (u_int8_t *)meta, do_metachk);
+#endif
+ return (ret);
+}
+
+/*
+ * __db_meta_setup --
+ *
+ * Take a buffer containing a meta-data page and figure out if it's
+ * valid, and if so, initialize the dbp from the meta-data page.
+ *
+ * PUBLIC: int __db_meta_setup __P((DB_ENV *,
+ * PUBLIC: DB *, const char *, DBMETA *, u_int32_t, int));
+ */
+int
+__db_meta_setup(dbenv, dbp, name, meta, oflags, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+ DBMETA *meta;
+ u_int32_t oflags;
+ int do_metachk;
+{
+ u_int32_t flags, magic;
+ int ret;
+
+ ret = 0;
+
+ /*
+ * Figure out what access method we're dealing with, and then
+ * call access method specific code to check error conditions
+ * based on conflicts between the found file and application
+ * arguments. A found file overrides some user information --
+ * we don't consider it an error, for example, if the user set
+ * an expected byte order and the found file doesn't match it.
+ */
+ F_CLR(dbp, DB_AM_SWAP);
+ magic = meta->magic;
+
+swap_retry:
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ case DB_RENAMEMAGIC:
+ break;
+ case 0:
+ /*
+ * The only time this should be 0 is if we're in the
+ * midst of opening a subdb during recovery and that
+ * subdatabase had its meta-data page allocated, but
+ * not yet initialized.
+ */
+ if (F_ISSET(dbp, DB_AM_SUBDB) && ((IS_RECOVERING(dbenv) &&
+ F_ISSET((DB_LOG *) dbenv->lg_handle, DBLOG_FORCE_OPEN)) ||
+ meta->pgno != PGNO_INVALID))
+ return (ENOENT);
+
+ goto bad_format;
+ default:
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ goto bad_format;
+
+ M_32_SWAP(magic);
+ F_SET(dbp, DB_AM_SWAP);
+ goto swap_retry;
+ }
+
+ /*
+ * We can only check the meta page if we are sure we have a meta page.
+ * If it is random data, then this check can fail. So only now can we
+ * checksum and decrypt. Don't distinguish between configuration and
+ * checksum match errors here, because we haven't opened the database
+ * and even a checksum error isn't a reason to panic the environment.
+ */
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, do_metachk)) != 0) {
+ if (ret == -1)
+ __db_err(dbenv,
+ "%s: metadata page checksum error", name);
+ goto bad_format;
+ }
+
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ flags = meta->flags;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(flags);
+ if (LF_ISSET(BTM_RECNO))
+ dbp->type = DB_RECNO;
+ else
+ dbp->type = DB_BTREE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __bam_metachk(dbp, name, (BTMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_HASHMAGIC:
+ dbp->type = DB_HASH;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __ham_metachk(dbp, name, (HMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_QAMMAGIC:
+ dbp->type = DB_QUEUE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __qam_metachk(dbp, name, (QMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_RENAMEMAGIC:
+ F_SET(dbp, DB_AM_IN_RENAME);
+ break;
+ }
+ return (0);
+
+bad_format:
+ __db_err(dbenv, "%s: unexpected file type or format", name);
+ return (ret == 0 ? EINVAL : ret);
+}
+
+/*
+ * __db_openchk --
+ * Interface error checking for open calls.
+ */
+static int
+__db_openchk(dbp, txn, name, subdb, type, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t ok_flags;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+#define OKFLAGS \
+ (DB_AUTO_COMMIT | DB_CREATE | DB_DIRTY_READ | DB_EXCL | \
+ DB_FCNTL_LOCKING | DB_NOMMAP | DB_RDONLY | DB_RDWRMASTER | \
+ DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN)
+ if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+ if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+
+#ifdef HAVE_VXWORKS
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv, "DB_TRUNCATE unsupported in VxWorks");
+ return (__db_eopnotsup(dbenv));
+ }
+#endif
+ switch (type) {
+ case DB_UNKNOWN:
+ if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
+ name);
+ return (EINVAL);
+ }
+ ok_flags = 0;
+ break;
+ case DB_BTREE:
+ ok_flags = DB_OK_BTREE;
+ break;
+ case DB_HASH:
+ ok_flags = DB_OK_HASH;
+ break;
+ case DB_QUEUE:
+ ok_flags = DB_OK_QUEUE;
+ break;
+ case DB_RECNO:
+ ok_flags = DB_OK_RECNO;
+ break;
+ default:
+ __db_err(dbenv, "unknown type: %lu", (u_long)type);
+ return (EINVAL);
+ }
+ if (ok_flags)
+ DB_ILLEGAL_METHOD(dbp, ok_flags);
+
+ /* The environment may have been created, but never opened. */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) {
+ __db_err(dbenv, "environment not yet opened");
+ return (EINVAL);
+ }
+
+ /*
+ * Historically, you could pass in an environment that didn't have a
+ * mpool, and DB would create a private one behind the scenes. This
+ * no longer works.
+ */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) {
+ __db_err(dbenv, "environment did not include a memory pool");
+ return (EINVAL);
+ }
+
+ /*
+ * You can't specify threads during DB->open if subsystems in the
+ * environment weren't configured with them.
+ */
+ if (LF_ISSET(DB_THREAD) &&
+ !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) {
+ __db_err(dbenv, "environment not created using DB_THREAD");
+ return (EINVAL);
+ }
+
+ /* DB_TRUNCATE is not transaction recoverable. */
+ if (LF_ISSET(DB_TRUNCATE) && txn != NULL) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with transaction specified");
+ return (EINVAL);
+ }
+
+ /* Subdatabase checks. */
+ if (subdb != NULL) {
+ /* Subdatabases must be created in named files. */
+ if (name == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ return (EINVAL);
+ }
+
+ /* Truncate is a physical file operation */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with multiple databases");
+ return (EINVAL);
+ }
+
+ /* QAM can't be done as a subdatabase. */
+ if (type == DB_QUEUE) {
+ __db_err(dbenv, "Queue databases must be one-per-file");
+ return (EINVAL);
+ }
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/db/db_overflow.c b/storage/bdb/db/db_overflow.c
new file mode 100644
index 00000000000..27dcb41a2ff
--- /dev/null
+++ b/storage/bdb/db/db_overflow.c
@@ -0,0 +1,726 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_overflow.c,v 11.46 2002/08/08 03:57:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_verify.h"
+
+/*
+ * Big key/data code.
+ *
+ * Big key and data entries are stored on linked lists of pages. The initial
+ * reference is a structure with the total length of the item and the page
+ * number where it begins. Each entry in the linked list contains a pointer
+ * to the next page of data, and so on.
+ */
+
+/*
+ * __db_goff --
+ * Get an offpage item.
+ *
+ * PUBLIC: int __db_goff __P((DB *, DBT *,
+ * PUBLIC: u_int32_t, db_pgno_t, void **, u_int32_t *));
+ */
+int
+__db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
+ DB *dbp;
+ DBT *dbt;
+ u_int32_t tlen;
+ db_pgno_t pgno;
+ void **bpp;
+ u_int32_t *bpsz;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t bytes;
+ u_int32_t curoff, needed, start;
+ u_int8_t *p, *src;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ /*
+ * Check if the buffer is big enough; if it is not and we are
+ * allowed to malloc space, then we'll malloc it. If we are
+ * not (DB_DBT_USERMEM), then we'll set the dbt and return
+ * appropriately.
+ */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ start = dbt->doff;
+ if (start > tlen)
+ needed = 0;
+ else if (dbt->dlen > tlen - start)
+ needed = tlen - start;
+ else
+ needed = dbt->dlen;
+ } else {
+ start = 0;
+ needed = tlen;
+ }
+
+ /* Allocate any necessary memory. */
+ if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (needed > dbt->ulen) {
+ dbt->size = needed;
+ return (ENOMEM);
+ }
+ } else if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_umalloc(dbenv, needed, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_urealloc(dbenv, needed, &dbt->data)) != 0)
+ return (ret);
+ } else if (*bpsz == 0 || *bpsz < needed) {
+ if ((ret = __os_realloc(dbenv, needed, bpp)) != 0)
+ return (ret);
+ *bpsz = needed;
+ dbt->data = *bpp;
+ } else
+ dbt->data = *bpp;
+
+ /*
+ * Step through the linked list of pages, copying the data on each
+ * one into the buffer. Never copy more than the total data length.
+ */
+ dbt->size = needed;
+ for (curoff = 0, p = dbt->data; pgno != PGNO_INVALID && needed > 0;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ /* Check if we need any bytes from this page. */
+ if (curoff + OV_LEN(h) >= start) {
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
+ bytes = OV_LEN(h);
+ if (start > curoff) {
+ src += start - curoff;
+ bytes -= start - curoff;
+ }
+ if (bytes > needed)
+ bytes = needed;
+ memcpy(p, src, bytes);
+ p += bytes;
+ needed -= bytes;
+ }
+ curoff += OV_LEN(h);
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ }
+ return (0);
+}
+
+/*
+ * __db_poff --
+ * Put an offpage item.
+ *
+ * PUBLIC: int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+ */
+int
+__db_poff(dbc, dbt, pgnop)
+ DBC *dbc;
+ const DBT *dbt;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBT tmp_dbt;
+ DB_LSN new_lsn, null_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep, *lastp;
+ db_indx_t pagespace;
+ u_int32_t sz;
+ u_int8_t *p;
+ int ret, t_ret;
+
+ /*
+ * Allocate pages and copy the key/data item into them. Calculate the
+ * number of bytes we get for pages we fill completely with a single
+ * item.
+ */
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ pagespace = P_MAXSPACE(dbp, dbp->pgsize);
+
+ ret = 0;
+ lastp = NULL;
+ for (p = dbt->data,
+ sz = dbt->size; sz > 0; p += pagespace, sz -= pagespace) {
+ /*
+ * Reduce pagespace so we terminate the loop correctly and
+ * don't copy too much data.
+ */
+ if (sz < pagespace)
+ pagespace = sz;
+
+ /*
+ * Allocate and initialize a new page and copy all or part of
+ * the item onto the page. If sz is less than pagespace, we
+ * have a partial record.
+ */
+ if ((ret = __db_new(dbc, P_OVERFLOW, &pagep)) != 0)
+ break;
+ if (DBC_LOGGING(dbc)) {
+ tmp_dbt.data = p;
+ tmp_dbt.size = pagespace;
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &new_lsn, 0, DB_ADD_BIG, PGNO(pagep),
+ lastp ? PGNO(lastp) : PGNO_INVALID,
+ PGNO_INVALID, &tmp_dbt, &LSN(pagep),
+ lastp == NULL ? &null_lsn : &LSN(lastp),
+ &null_lsn)) != 0) {
+ if (lastp != NULL)
+ (void)mpf->put(mpf,
+ lastp, DB_MPOOL_DIRTY);
+ lastp = pagep;
+ break;
+ }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move LSN onto page. */
+ if (lastp != NULL)
+ LSN(lastp) = new_lsn;
+ LSN(pagep) = new_lsn;
+
+ P_INIT(pagep, dbp->pgsize,
+ PGNO(pagep), PGNO_INVALID, PGNO_INVALID, 0, P_OVERFLOW);
+ OV_LEN(pagep) = pagespace;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(dbp), p, pagespace);
+
+ /*
+ * If this is the first entry, update the user's info.
+ * Otherwise, update the entry on the last page filled
+ * in and release that page.
+ */
+ if (lastp == NULL)
+ *pgnop = PGNO(pagep);
+ else {
+ lastp->next_pgno = PGNO(pagep);
+ pagep->prev_pgno = PGNO(lastp);
+ (void)mpf->put(mpf, lastp, DB_MPOOL_DIRTY);
+ }
+ lastp = pagep;
+ }
+ if (lastp != NULL &&
+ (t_ret = mpf->put(mpf, lastp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ *
+ * PUBLIC: int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+ */
+int
+__db_ovref(dbc, pgno, adjust)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int32_t adjust;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_ovref_log(dbp,
+ dbc->txn, &LSN(h), 0, h->pgno, adjust, &LSN(h))) != 0) {
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(h));
+ OV_REF(h) += adjust;
+
+ (void)mpf->put(mpf, h, DB_MPOOL_DIRTY);
+ return (0);
+}
+
+/*
+ * __db_doff --
+ * Delete an offpage chain of overflow pages.
+ *
+ * PUBLIC: int __db_doff __P((DBC *, db_pgno_t));
+ */
+int
+__db_doff(dbc, pgno)
+ DBC *dbc;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ PAGE *pagep;
+ DB_LSN null_lsn;
+ DB_MPOOLFILE *mpf;
+ DBT tmp_dbt;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ do {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+ DB_ASSERT(TYPE(pagep) == P_OVERFLOW);
+ /*
+ * If it's referenced by more than one key/data item,
+ * decrement the reference count and return.
+ */
+ if (OV_REF(pagep) > 1) {
+ (void)mpf->put(mpf, pagep, 0);
+ return (__db_ovref(dbc, pgno, -1));
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD(dbp);
+ tmp_dbt.size = OV_LEN(pagep);
+ ZERO_LSN(null_lsn);
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_BIG,
+ PGNO(pagep), PREV_PGNO(pagep),
+ NEXT_PGNO(pagep), &tmp_dbt,
+ &LSN(pagep), &null_lsn, &null_lsn)) != 0) {
+ (void)mpf->put(mpf, pagep, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+ pgno = pagep->next_pgno;
+ if ((ret = __db_free(dbc, pagep)) != 0)
+ return (ret);
+ } while (pgno != PGNO_INVALID);
+
+ return (0);
+}
+
+/*
+ * __db_moff --
+ * Match on overflow pages.
+ *
+ * Given a starting page number and a key, return <0, 0, >0 to indicate if the
+ * key on the page is less than, equal to or greater than the key specified.
+ * We optimize this by doing chunk at a time comparison unless the user has
+ * specified a comparison function. In this case, we need to materialize
+ * the entire object and call their comparison routine.
+ *
+ * PUBLIC: int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *), int *));
+ */
+int
+__db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)
+ DB *dbp;
+ const DBT *dbt;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int (*cmpfunc) __P((DB *, const DBT *, const DBT *)), *cmpp;
+{
+ DBT local_dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ void *buf;
+ u_int32_t bufsize, cmp_bytes, key_left;
+ u_int8_t *p1, *p2;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ /*
+ * If there is a user-specified comparison function, build a
+ * contiguous copy of the key, and call it.
+ */
+ if (cmpfunc != NULL) {
+ memset(&local_dbt, 0, sizeof(local_dbt));
+ buf = NULL;
+ bufsize = 0;
+
+ if ((ret = __db_goff(dbp,
+ &local_dbt, tlen, pgno, &buf, &bufsize)) != 0)
+ return (ret);
+ /* Pass the key as the first argument */
+ *cmpp = cmpfunc(dbp, dbt, &local_dbt);
+ __os_free(dbp->dbenv, buf);
+ return (0);
+ }
+
+ /* While there are both keys to compare. */
+ for (*cmpp = 0, p1 = dbt->data,
+ key_left = dbt->size; key_left > 0 && pgno != PGNO_INVALID;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ return (ret);
+
+ cmp_bytes = OV_LEN(pagep) < key_left ? OV_LEN(pagep) : key_left;
+ tlen -= cmp_bytes;
+ key_left -= cmp_bytes;
+ for (p2 = (u_int8_t *)pagep + P_OVERHEAD(dbp);
+ cmp_bytes-- > 0; ++p1, ++p2)
+ if (*p1 != *p2) {
+ *cmpp = (long)*p1 - (long)*p2;
+ break;
+ }
+ pgno = NEXT_PGNO(pagep);
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
+ return (ret);
+ if (*cmpp != 0)
+ return (0);
+ }
+ if (key_left > 0) /* DBT is longer than the page key. */
+ *cmpp = 1;
+ else if (tlen > 0) /* DBT is shorter than the page key. */
+ *cmpp = -1;
+ else
+ *cmpp = 0;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_overflow --
+ * Verify overflow page.
+ *
+ * PUBLIC: int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__db_vrfy_overflow(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ pip->refcount = OV_REF(h);
+ if (pip->refcount < 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow page has zero reference count",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /* Just store for now. */
+ pip->olen = HOFFSET(h);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_ovfl_structure --
+ * Walk a list of overflow pages, avoiding cycles and marking
+ * pages seen.
+ *
+ * PUBLIC: int __db_vrfy_ovfl_structure
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+ */
+int
+__db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ u_int32_t flags;
+{
+ DB *pgset;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t next, prev;
+ int isbad, p, ret, t_ret;
+ u_int32_t refcount;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+ isbad = 0;
+
+ /* This shouldn't happen, but just to be sure. */
+ if (!IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /*
+ * Check the first prev_pgno; it ought to be PGNO_INVALID,
+ * since there's no prev page.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* The refcount is stored on the first overflow page. */
+ refcount = pip->refcount;
+
+ if (pip->type != P_OVERFLOW) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow page of invalid type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ ret = DB_VERIFY_BAD;
+ goto err; /* Unsafe to continue. */
+ }
+
+ prev = pip->prev_pgno;
+ if (prev != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first page in overflow chain has a prev_pgno %lu",
+ (u_long)pgno, (u_long)prev));
+ isbad = 1;
+ }
+
+ for (;;) {
+ /*
+ * This is slightly gross. Btree leaf pages reference
+ * individual overflow trees multiple times if the overflow page
+ * is the key to a duplicate set. The reference count does not
+ * reflect this multiple referencing. Thus, if this is called
+ * during the structure verification of a btree leaf page, we
+ * check to see whether we've seen it from a leaf page before
+ * and, if we have, adjust our count of how often we've seen it
+ * accordingly.
+ *
+ * (This will screw up if it's actually referenced--and
+ * correctly refcounted--from two different leaf pages, but
+ * that's a very unlikely brokenness that we're not checking for
+ * anyway.)
+ */
+
+ if (LF_ISSET(ST_OVFL_LEAF)) {
+ if (F_ISSET(pip, VRFY_OVFL_LEAFSEEN)) {
+ if ((ret =
+ __db_vrfy_pgset_dec(pgset, pgno)) != 0)
+ goto err;
+ } else
+ F_SET(pip, VRFY_OVFL_LEAFSEEN);
+ }
+
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+
+ /*
+ * We may have seen this elsewhere, if the overflow entry
+ * has been promoted to an internal page.
+ */
+ if ((u_int32_t)p > refcount) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: encountered twice in overflow traversal",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+
+ /* Keep a running tab on how much of the item we've seen. */
+ tlen -= pip->olen;
+
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ next = pip->next_pgno;
+
+ /* Are we there yet? */
+ if (next == PGNO_INVALID)
+ break;
+
+ /*
+ * We've already checked this when we saved it, but just
+ * to be sure...
+ */
+ if (!IS_VALID_PGNO(next)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad next_pgno %lu on overflow page",
+ (u_long)pgno, (u_long)next));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, next, &pip)) != 0)
+ return (ret);
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad prev_pgno %lu on overflow page (should be %lu)",
+ (u_long)next, (u_long)pip->prev_pgno,
+ (u_long)pgno));
+ isbad = 1;
+ /*
+ * It's safe to continue because we have separate
+ * cycle detection.
+ */
+ }
+
+ pgno = next;
+ }
+
+ if (tlen > 0) {
+ isbad = 1;
+ EPRINT((dbp->dbenv,
+ "Page %lu: overflow item incomplete", (u_long)pgno));
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_safe_goff --
+ * Get an overflow item, very carefully, from an untrusted database,
+ * in the context of the salvager.
+ *
+ * PUBLIC: int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void **, u_int32_t));
+ */
+int
+__db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *dbt;
+ void **buf;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+ u_int32_t bytesgot, bytes;
+ u_int8_t *src, *dest;
+
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = t_ret = 0;
+ bytesgot = bytes = 0;
+
+ while ((pgno != PGNO_INVALID) && (IS_VALID_PGNO(pgno))) {
+ /*
+ * Mark that we're looking at this page; if we've seen it
+ * already, quit.
+ */
+ if ((ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ break;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ break;
+
+ /*
+ * Make sure it's really an overflow page, unless we're
+ * being aggressive, in which case we pretend it is.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE) && TYPE(h) != P_OVERFLOW) {
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
+ bytes = OV_LEN(h);
+
+ if (bytes + P_OVERHEAD(dbp) > dbp->pgsize)
+ bytes = dbp->pgsize - P_OVERHEAD(dbp);
+
+ if ((ret = __os_realloc(dbp->dbenv,
+ bytesgot + bytes, buf)) != 0)
+ break;
+
+ dest = (u_int8_t *)*buf + bytesgot;
+ bytesgot += bytes;
+
+ memcpy(dest, src, bytes);
+
+ pgno = NEXT_PGNO(h);
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ break;
+ h = NULL;
+ }
+
+ /*
+ * If we're being aggressive, salvage a partial datum if there
+ * was an error somewhere along the way.
+ */
+ if (ret == 0 || LF_ISSET(DB_AGGRESSIVE)) {
+ dbt->size = bytesgot;
+ dbt->data = *buf;
+ }
+
+ /* If we broke out on error, don't leave pages pinned. */
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_pr.c b/storage/bdb/db/db_pr.c
new file mode 100644
index 00000000000..235e7187f7c
--- /dev/null
+++ b/storage/bdb/db/db_pr.c
@@ -0,0 +1,1294 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_pr.c,v 11.84 2002/09/10 02:45:20 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_verify.h"
+
+static int __db_bmeta __P((DB *, FILE *, BTMETA *, u_int32_t));
+static int __db_hmeta __P((DB *, FILE *, HMETA *, u_int32_t));
+static void __db_meta __P((DB *, DBMETA *, FILE *, FN const *, u_int32_t));
+static const char *__db_pagetype_to_string __P((u_int32_t));
+static void __db_prdb __P((DB *, FILE *));
+static void __db_proff __P((void *, FILE *));
+static int __db_prtree __P((DB *, FILE *, u_int32_t));
+static int __db_qmeta __P((DB *, FILE *, QMETA *, u_int32_t));
+
+/*
+ * __db_loadme --
+ * A nice place to put a breakpoint.
+ *
+ * PUBLIC: void __db_loadme __P((void));
+ */
+void
+__db_loadme()
+{
+ u_int32_t id;
+
+ __os_id(&id);
+}
+
+/*
+ * __db_dump --
+ * Dump the tree to a file.
+ *
+ * PUBLIC: int __db_dump __P((DB *, char *, char *));
+ */
+int
+__db_dump(dbp, op, name)
+ DB *dbp;
+ char *op, *name;
+{
+ FILE *fp;
+ u_int32_t flags;
+ int ret;
+
+ for (flags = 0; *op != '\0'; ++op)
+ switch (*op) {
+ case 'a':
+ LF_SET(DB_PR_PAGE);
+ break;
+ case 'h':
+ break;
+ case 'r':
+ LF_SET(DB_PR_RECOVERYTEST);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (name == NULL)
+ fp = stdout;
+ else {
+ if ((fp = fopen(name, "w")) == NULL)
+ return (__os_get_errno());
+ }
+
+ __db_prdb(dbp, fp);
+
+ fprintf(fp, "%s\n", DB_LINE);
+
+ ret = __db_prtree(dbp, fp, flags);
+
+ fflush(fp);
+ if (name != NULL)
+ fclose(fp);
+
+ return (ret);
+}
+
+/*
+ * __db_inmemdbflags --
+ * Call a callback for printing or other handling of strings associated
+ * with whatever in-memory DB structure flags are set.
+ *
+ * PUBLIC: void __db_inmemdbflags __P((u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *)));
+ */
+void
+__db_inmemdbflags(flags, cookie, callback)
+ u_int32_t flags;
+ void *cookie;
+ void (*callback) __P((u_int32_t, const FN *, void *));
+{
+ static const FN fn[] = {
+ { DB_AM_CHKSUM, "checksumming" },
+ { DB_AM_CL_WRITER, "client replica writer" },
+ { DB_AM_COMPENSATE, "created by compensating transaction" },
+ { DB_AM_CREATED, "database created" },
+ { DB_AM_CREATED_MSTR, "encompassing file created" },
+ { DB_AM_DBM_ERROR, "dbm/ndbm error" },
+ { DB_AM_DELIMITER, "variable length" },
+ { DB_AM_DIRTY, "dirty reads" },
+ { DB_AM_DISCARD, "discard cached pages" },
+ { DB_AM_DUP, "duplicates" },
+ { DB_AM_DUPSORT, "sorted duplicates" },
+ { DB_AM_ENCRYPT, "encrypted" },
+ { DB_AM_FIXEDLEN, "fixed-length records" },
+ { DB_AM_INMEM, "in-memory" },
+ { DB_AM_IN_RENAME, "file is being renamed" },
+ { DB_AM_OPEN_CALLED, "DB->open called" },
+ { DB_AM_PAD, "pad value" },
+ { DB_AM_PGDEF, "default page size" },
+ { DB_AM_RDONLY, "read-only" },
+ { DB_AM_RECNUM, "Btree record numbers" },
+ { DB_AM_RECOVER, "opened for recovery" },
+ { DB_AM_RENUMBER, "renumber" },
+ { DB_AM_REVSPLITOFF, "no reverse splits" },
+ { DB_AM_SECONDARY, "secondary" },
+ { DB_AM_SNAPSHOT, "load on open" },
+ { DB_AM_SUBDB, "subdatabases" },
+ { DB_AM_SWAP, "needswap" },
+ { DB_AM_TXN, "transactional" },
+ { DB_AM_VERIFYING, "verifier" },
+ { 0, NULL }
+ };
+
+ callback(flags, fn, cookie);
+}
+
+/*
+ * __db_prdb --
+ * Print out the DB structure information.
+ */
+static void
+__db_prdb(dbp, fp)
+ DB *dbp;
+ FILE *fp;
+{
+ BTREE *bt;
+ HASH *h;
+ QUEUE *q;
+
+ fprintf(fp,
+ "In-memory DB structure:\n%s: %#lx",
+ __db_dbtype_to_string(dbp->type), (u_long)dbp->flags);
+ __db_inmemdbflags(dbp->flags, fp, __db_prflags);
+ fprintf(fp, "\n");
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ bt = dbp->bt_internal;
+ fprintf(fp, "bt_meta: %lu bt_root: %lu\n",
+ (u_long)bt->bt_meta, (u_long)bt->bt_root);
+ fprintf(fp, "bt_maxkey: %lu bt_minkey: %lu\n",
+ (u_long)bt->bt_maxkey, (u_long)bt->bt_minkey);
+ fprintf(fp, "bt_compare: %#lx bt_prefix: %#lx\n",
+ P_TO_ULONG(bt->bt_compare), P_TO_ULONG(bt->bt_prefix));
+ fprintf(fp, "bt_lpgno: %lu\n", (u_long)bt->bt_lpgno);
+ if (dbp->type == DB_RECNO) {
+ fprintf(fp,
+ "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s\n",
+ (u_long)bt->re_pad, (u_long)bt->re_delim,
+ (u_long)bt->re_len,
+ bt->re_source == NULL ? "" : bt->re_source);
+ fprintf(fp, "re_modified: %d re_eof: %d re_last: %lu\n",
+ bt->re_modified, bt->re_eof, (u_long)bt->re_last);
+ }
+ break;
+ case DB_HASH:
+ h = dbp->h_internal;
+ fprintf(fp, "meta_pgno: %lu\n", (u_long)h->meta_pgno);
+ fprintf(fp, "h_ffactor: %lu\n", (u_long)h->h_ffactor);
+ fprintf(fp, "h_nelem: %lu\n", (u_long)h->h_nelem);
+ fprintf(fp, "h_hash: %#lx\n", P_TO_ULONG(h->h_hash));
+ break;
+ case DB_QUEUE:
+ q = dbp->q_internal;
+ fprintf(fp, "q_meta: %lu\n", (u_long)q->q_meta);
+ fprintf(fp, "q_root: %lu\n", (u_long)q->q_root);
+ fprintf(fp, "re_pad: %#lx re_len: %lu\n",
+ (u_long)q->re_pad, (u_long)q->re_len);
+ fprintf(fp, "rec_page: %lu\n", (u_long)q->rec_page);
+ fprintf(fp, "page_ext: %lu\n", (u_long)q->page_ext);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * __db_prtree --
+ * Print out the entire tree.
+ */
+static int
+__db_prtree(dbp, fp, flags)
+ DB *dbp;
+ FILE *fp;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t i, last;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ if (dbp->type == DB_QUEUE)
+ return (__db_prqueue(dbp, fp, flags));
+
+ /*
+ * Find out the page number of the last page in the database, then
+ * dump each page.
+ */
+ mpf->last_pgno(mpf, &last);
+ for (i = 0; i <= last; ++i) {
+ if ((ret = mpf->get(mpf, &i, 0, &h)) != 0)
+ return (ret);
+ (void)__db_prpage(dbp, h, fp, flags);
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_meta --
+ * Print out common metadata information.
+ */
+static void
+__db_meta(dbp, dbmeta, fp, fn, flags)
+ DB *dbp;
+ DBMETA *dbmeta;
+ FILE *fp;
+ FN const *fn;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int8_t *p;
+ int cnt, ret;
+ const char *sep;
+
+ mpf = dbp->mpf;
+
+ fprintf(fp, "\tmagic: %#lx\n", (u_long)dbmeta->magic);
+ fprintf(fp, "\tversion: %lu\n", (u_long)dbmeta->version);
+ fprintf(fp, "\tpagesize: %lu\n", (u_long)dbmeta->pagesize);
+ fprintf(fp, "\ttype: %lu\n", (u_long)dbmeta->type);
+ fprintf(fp, "\tkeys: %lu\trecords: %lu\n",
+ (u_long)dbmeta->key_count, (u_long)dbmeta->record_count);
+
+ if (!LF_ISSET(DB_PR_RECOVERYTEST)) {
+ /*
+ * If we're doing recovery testing, don't display the free
+ * list, it may have changed and that makes the dump diff
+ * not work.
+ */
+ fprintf(fp, "\tfree list: %lu", (u_long)dbmeta->free);
+ for (pgno = dbmeta->free,
+ cnt = 0, sep = ", "; pgno != PGNO_INVALID;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ fprintf(fp,
+ "Unable to retrieve free-list page: %lu: %s\n",
+ (u_long)pgno, db_strerror(ret));
+ break;
+ }
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ fprintf(fp, "%s%lu", sep, (u_long)pgno);
+ if (++cnt % 10 == 0) {
+ fprintf(fp, "\n");
+ cnt = 0;
+ sep = "\t";
+ } else
+ sep = ", ";
+ }
+ fprintf(fp, "\n");
+ fprintf(fp, "\tlast_pgno: %lu\n", (u_long)dbmeta->last_pgno);
+ }
+
+ if (fn != NULL) {
+ fprintf(fp, "\tflags: %#lx", (u_long)dbmeta->flags);
+ __db_prflags(dbmeta->flags, fn, fp);
+ fprintf(fp, "\n");
+ }
+
+ fprintf(fp, "\tuid: ");
+ for (p = (u_int8_t *)dbmeta->uid,
+ cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) {
+ fprintf(fp, "%x", *p++);
+ if (cnt < DB_FILE_ID_LEN - 1)
+ fprintf(fp, " ");
+ }
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_bmeta --
+ * Print out the btree meta-data page.
+ */
+static int
+__db_bmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ BTMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "btree:recnum" },
+ { BTM_FIXEDLEN, "recno:fixed-length" },
+ { BTM_RENUMBER, "recno:renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmaxkey: %lu minkey: %lu\n",
+ (u_long)h->maxkey, (u_long)h->minkey);
+ if (dbp->type == DB_RECNO)
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\troot: %lu\n", (u_long)h->root);
+
+ return (0);
+}
+
+/*
+ * __db_hmeta --
+ * Print out the hash meta-data page.
+ */
+static int
+__db_hmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ HMETA *h;
+ u_int32_t flags;
+{
+ static const FN mfn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ int i;
+
+ __db_meta(dbp, (DBMETA *)h, fp, mfn, flags);
+
+ fprintf(fp, "\tmax_bucket: %lu\n", (u_long)h->max_bucket);
+ fprintf(fp, "\thigh_mask: %#lx\n", (u_long)h->high_mask);
+ fprintf(fp, "\tlow_mask: %#lx\n", (u_long)h->low_mask);
+ fprintf(fp, "\tffactor: %lu\n", (u_long)h->ffactor);
+ fprintf(fp, "\tnelem: %lu\n", (u_long)h->nelem);
+ fprintf(fp, "\th_charkey: %#lx\n", (u_long)h->h_charkey);
+ fprintf(fp, "\tspare points: ");
+ for (i = 0; i < NCACHED; i++)
+ fprintf(fp, "%lu ", (u_long)h->spares[i]);
+ fprintf(fp, "\n");
+
+ return (0);
+}
+
+/*
+ * __db_qmeta --
+ * Print out the queue meta-data page.
+ */
+static int
+__db_qmeta(dbp, fp, h, flags)
+ DB *dbp;
+ FILE *fp;
+ QMETA *h;
+ u_int32_t flags;
+{
+ __db_meta(dbp, (DBMETA *)h, fp, NULL, flags);
+
+ fprintf(fp, "\tfirst_recno: %lu\n", (u_long)h->first_recno);
+ fprintf(fp, "\tcur_recno: %lu\n", (u_long)h->cur_recno);
+ fprintf(fp, "\tre_len: %#lx re_pad: %lu\n",
+ (u_long)h->re_len, (u_long)h->re_pad);
+ fprintf(fp, "\trec_page: %lu\n", (u_long)h->rec_page);
+ fprintf(fp, "\tpage_ext: %lu\n", (u_long)h->page_ext);
+
+ return (0);
+}
+
+/*
+ * __db_prnpage
+ * -- Print out a specific page.
+ *
+ * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t, FILE *));
+ */
+int
+__db_prnpage(dbp, pgno, fp)
+ DB *dbp;
+ db_pgno_t pgno;
+ FILE *fp;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ ret = __db_prpage(dbp, h, fp, DB_PR_PAGE);
+
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_prpage
+ * -- Print out a page.
+ *
+ * PUBLIC: int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t));
+ */
+int
+__db_prpage(dbp, h, fp, flags)
+ DB *dbp;
+ PAGE *h;
+ FILE *fp;
+ u_int32_t flags;
+{
+ BINTERNAL *bi;
+ BKEYDATA *bk;
+ HOFFPAGE a_hkd;
+ QAMDATA *qp, *qep;
+ RINTERNAL *ri;
+ db_indx_t dlen, len, i, *inp;
+ db_pgno_t pgno;
+ db_recno_t recno;
+ u_int32_t pagesize, qlen;
+ u_int8_t *ep, *hk, *p;
+ int deleted, ret;
+ const char *s;
+ void *sp;
+
+ /*
+ * If we're doing recovery testing and this page is P_INVALID,
+ * assume it's a page that's on the free list, and don't display it.
+ */
+ if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID)
+ return (0);
+
+ s = __db_pagetype_to_string(TYPE(h));
+ if (s == NULL) {
+ fprintf(fp, "ILLEGAL PAGE TYPE: page: %lu type: %lu\n",
+ (u_long)h->pgno, (u_long)TYPE(h));
+ return (1);
+ }
+
+ /*
+ * !!!
+ * Find out the page size. We don't want to do it the "right" way,
+ * by reading the value from the meta-data page, that's going to be
+ * slow. Reach down into the mpool region.
+ */
+ pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize;
+
+ /* Page number, page type. */
+ fprintf(fp, "page %lu: %s level: %lu",
+ (u_long)h->pgno, s, (u_long)h->level);
+
+ /* Record count. */
+ if (TYPE(h) == P_IBTREE ||
+ TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO &&
+ h->pgno == ((BTREE *)dbp->bt_internal)->bt_root))
+ fprintf(fp, " records: %lu", (u_long)RE_NREC(h));
+
+ /* LSN. */
+ if (!LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ return (__db_bmeta(dbp, fp, (BTMETA *)h, flags));
+ case P_HASHMETA:
+ return (__db_hmeta(dbp, fp, (HMETA *)h, flags));
+ case P_QAMMETA:
+ return (__db_qmeta(dbp, fp, (QMETA *)h, flags));
+ case P_QAMDATA: /* Should be meta->start. */
+ if (!LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ qlen = ((QUEUE *)dbp->q_internal)->re_len;
+ recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1;
+ i = 0;
+ qep = (QAMDATA *)((u_int8_t *)h + pagesize - qlen);
+ for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep;
+ recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) {
+ if (!F_ISSET(qp, QAM_SET))
+ continue;
+
+ fprintf(fp, "%s",
+ F_ISSET(qp, QAM_VALID) ? "\t" : " D");
+ fprintf(fp, "[%03lu] %4lu ", (u_long)recno,
+ (u_long)((u_int8_t *)qp - (u_int8_t *)h));
+ __db_pr(qp->data, qlen, fp);
+ }
+ return (0);
+ }
+
+ /* LSN. */
+ if (LF_ISSET(DB_PR_RECOVERYTEST))
+ fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
+ (u_long)LSN(h).file, (u_long)LSN(h).offset);
+
+ s = "\t";
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ fprintf(fp, "%sprev: %4lu next: %4lu",
+ s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h));
+ s = " ";
+ }
+ if (TYPE(h) == P_OVERFLOW) {
+ fprintf(fp, "%sref cnt: %4lu ", s, (u_long)OV_REF(h));
+ __db_pr((u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h), fp);
+ return (0);
+ }
+ fprintf(fp, "%sentries: %4lu", s, (u_long)NUM_ENT(h));
+ fprintf(fp, " offset: %4lu\n", (u_long)HOFFSET(h));
+
+ if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE))
+ return (0);
+
+ ret = 0;
+ inp = P_INP(dbp, h);
+ for (i = 0; i < NUM_ENT(h); i++) {
+ if ((db_alignp_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) <
+ (db_alignp_t)(P_OVERHEAD(dbp)) ||
+ (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) {
+ fprintf(fp,
+ "ILLEGAL PAGE OFFSET: indx: %lu of %lu\n",
+ (u_long)i, (u_long)inp[i]);
+ ret = EINVAL;
+ continue;
+ }
+ deleted = 0;
+ switch (TYPE(h)) {
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ sp = P_ENTRY(dbp, h, i);
+ break;
+ case P_LBTREE:
+ sp = P_ENTRY(dbp, h, i);
+ deleted = i % 2 == 0 &&
+ B_DISSET(GET_BKEYDATA(dbp, h, i + O_INDX)->type);
+ break;
+ case P_LDUP:
+ case P_LRECNO:
+ sp = P_ENTRY(dbp, h, i);
+ deleted = B_DISSET(GET_BKEYDATA(dbp, h, i)->type);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL PAGE ITEM: %lu\n", (u_long)TYPE(h));
+ ret = EINVAL;
+ continue;
+ }
+ fprintf(fp, "%s", deleted ? " D" : "\t");
+ fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]);
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = sp;
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&pgno,
+ HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ fprintf(fp,
+ "%4lu [offpage dups]\n", (u_long)pgno);
+ break;
+ case H_DUPLICATE:
+ /*
+ * If this is the first item on a page, then
+ * we cannot figure out how long it is, so
+ * we only print the first one in the duplicate
+ * set.
+ */
+ if (i != 0)
+ len = LEN_HKEYDATA(dbp, h, 0, i);
+ else
+ len = 1;
+
+ fprintf(fp, "Duplicates:\n");
+ for (p = HKEYDATA_DATA(hk),
+ ep = p + len; p < ep;) {
+ memcpy(&dlen, p, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ fprintf(fp, "\t\t");
+ __db_pr(p, dlen, fp);
+ p += sizeof(db_indx_t) + dlen;
+ }
+ break;
+ case H_KEYDATA:
+ __db_pr(HKEYDATA_DATA(hk),
+ LEN_HKEYDATA(dbp, h, i == 0 ?
+ pagesize : 0, i), fp);
+ break;
+ case H_OFFPAGE:
+ memcpy(&a_hkd, hk, HOFFPAGE_SIZE);
+ fprintf(fp,
+ "overflow: total len: %4lu page: %4lu\n",
+ (u_long)a_hkd.tlen, (u_long)a_hkd.pgno);
+ break;
+ }
+ break;
+ case P_IBTREE:
+ bi = sp;
+ fprintf(fp, "count: %4lu pgno: %4lu type: %4lu",
+ (u_long)bi->nrecs, (u_long)bi->pgno,
+ (u_long)bi->type);
+ switch (B_TYPE(bi->type)) {
+ case B_KEYDATA:
+ __db_pr(bi->data, bi->len, fp);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bi->data, fp);
+ break;
+ default:
+ fprintf(fp, "ILLEGAL BINTERNAL TYPE: %lu\n",
+ (u_long)B_TYPE(bi->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ case P_IRECNO:
+ ri = sp;
+ fprintf(fp, "entries %4lu pgno %4lu\n",
+ (u_long)ri->nrecs, (u_long)ri->pgno);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = sp;
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ __db_pr(bk->data, bk->len, fp);
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ __db_proff(bk, fp);
+ break;
+ default:
+ fprintf(fp,
+ "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu\n",
+ (u_long)B_TYPE(bk->type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+ }
+ }
+ (void)fflush(fp);
+ return (ret);
+}
+
+/*
+ * __db_pr --
+ * Print out a data element.
+ *
+ * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t, FILE *));
+ */
+void
+__db_pr(p, len, fp)
+ u_int8_t *p;
+ u_int32_t len;
+ FILE *fp;
+{
+ u_int lastch;
+ int i;
+
+ fprintf(fp, "len: %3lu", (u_long)len);
+ lastch = '.';
+ if (len != 0) {
+ fprintf(fp, " data: ");
+ for (i = len <= 20 ? len : 20; i > 0; --i, ++p) {
+ lastch = *p;
+ if (isprint((int)*p) || *p == '\n')
+ fprintf(fp, "%c", *p);
+ else
+ fprintf(fp, "0x%.2x", (u_int)*p);
+ }
+ if (len > 20) {
+ fprintf(fp, "...");
+ lastch = '.';
+ }
+ }
+ if (lastch != '\n')
+ fprintf(fp, "\n");
+}
+
+/*
+ * __db_prdbt --
+ * Print out a DBT data element.
+ *
+ * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *,
+ * PUBLIC: int (*)(void *, const void *), int, VRFY_DBINFO *));
+ */
+int
+__db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
+ DBT *dbtp;
+ int checkprint;
+ const char *prefix;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ int is_recno;
+ VRFY_DBINFO *vdp;
+{
+ static const char hex[] = "0123456789abcdef";
+ db_recno_t recno;
+ u_int32_t len;
+ int ret;
+#define DBTBUFLEN 100
+ char *p, *hp, buf[DBTBUFLEN], hbuf[DBTBUFLEN];
+
+ if (vdp != NULL) {
+ /*
+ * If vdp is non-NULL, we might be the first key in the
+ * "fake" subdatabase used for key/data pairs we can't
+ * associate with a known subdb.
+ *
+ * Check and clear the SALVAGE_PRINTHEADER flag; if
+ * it was set, print a subdatabase header.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTHEADER))
+ (void)__db_prheader(NULL, "__OTHER__", 0, 0,
+ handle, callback, vdp, 0);
+ F_CLR(vdp, SALVAGE_PRINTHEADER);
+ F_SET(vdp, SALVAGE_PRINTFOOTER);
+
+ /*
+ * Even if the printable flag wasn't set by our immediate
+ * caller, it may be set on a salvage-wide basis.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ checkprint = 1;
+ }
+
+ /*
+ * !!!
+ * This routine is the routine that dumps out items in the format
+ * used by db_dump(1) and db_load(1). This means that the format
+ * cannot change.
+ */
+ if (prefix != NULL && (ret = callback(handle, prefix)) != 0)
+ return (ret);
+ if (is_recno) {
+ /*
+ * We're printing a record number, and this has to be done
+ * in a platform-independent way. So we use the numeral in
+ * straight ASCII.
+ */
+ (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno));
+ snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno);
+
+ /* If we're printing data as hex, print keys as hex too. */
+ if (!checkprint) {
+ for (len = (u_int32_t)strlen(buf), p = buf, hp = hbuf;
+ len-- > 0; ++p) {
+ *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4];
+ *hp++ = hex[*p & 0x0f];
+ }
+ *hp = '\0';
+ ret = callback(handle, hbuf);
+ } else
+ ret = callback(handle, buf);
+
+ if (ret != 0)
+ return (ret);
+ } else if (checkprint) {
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\' &&
+ (ret = callback(handle, "\\")) != 0)
+ return (ret);
+ snprintf(buf, DBTBUFLEN, "%c", *p);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ } else {
+ snprintf(buf, DBTBUFLEN, "\\%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+ } else
+ for (len = dbtp->size, p = dbtp->data; len--; ++p) {
+ snprintf(buf, DBTBUFLEN, "%c%c",
+ hex[(u_int8_t)(*p & 0xf0) >> 4],
+ hex[*p & 0x0f]);
+ if ((ret = callback(handle, buf)) != 0)
+ return (ret);
+ }
+
+ return (callback(handle, "\n"));
+}
+
+/*
+ * __db_proff --
+ * Print out an off-page element.
+ */
+static void
+__db_proff(vp, fp)
+ void *vp;
+ FILE *fp;
+{
+ BOVERFLOW *bo;
+
+ bo = vp;
+ switch (B_TYPE(bo->type)) {
+ case B_OVERFLOW:
+ fprintf(fp, "overflow: total len: %4lu page: %4lu\n",
+ (u_long)bo->tlen, (u_long)bo->pgno);
+ break;
+ case B_DUPLICATE:
+ fprintf(fp, "duplicate: page: %4lu\n", (u_long)bo->pgno);
+ break;
+ }
+}
+
+/*
+ * __db_prflags --
+ * Print out flags values.
+ *
+ * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, void *));
+ */
+void
+__db_prflags(flags, fn, vfp)
+ u_int32_t flags;
+ FN const *fn;
+ void *vfp;
+{
+ FILE *fp;
+ const FN *fnp;
+ int found;
+ const char *sep;
+
+ /*
+ * We pass the FILE * through a void * so that we can use
+ * this function as as a callback.
+ */
+ fp = (FILE *)vfp;
+
+ sep = " (";
+ for (found = 0, fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ fprintf(fp, "%s%s", sep, fnp->name);
+ sep = ", ";
+ found = 1;
+ }
+ if (found)
+ fprintf(fp, ")");
+}
+
+/*
+ * __db_dbtype_to_string --
+ * Return the name of the database type.
+ * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE));
+ */
+const char *
+__db_dbtype_to_string(type)
+ DBTYPE type;
+{
+ switch (type) {
+ case DB_BTREE:
+ return ("btree");
+ case DB_HASH:
+ return ("hash");
+ case DB_RECNO:
+ return ("recno");
+ case DB_QUEUE:
+ return ("queue");
+ default:
+ return ("UNKNOWN TYPE");
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __db_pagetype_to_string --
+ * Return the name of the specified page type.
+ */
+static const char *
+__db_pagetype_to_string(type)
+ u_int32_t type;
+{
+ char *s;
+
+ s = NULL;
+ switch (type) {
+ case P_BTREEMETA:
+ s = "btree metadata";
+ break;
+ case P_LDUP:
+ s = "duplicate";
+ break;
+ case P_HASH:
+ s = "hash";
+ break;
+ case P_HASHMETA:
+ s = "hash metadata";
+ break;
+ case P_IBTREE:
+ s = "btree internal";
+ break;
+ case P_INVALID:
+ s = "invalid";
+ break;
+ case P_IRECNO:
+ s = "recno internal";
+ break;
+ case P_LBTREE:
+ s = "btree leaf";
+ break;
+ case P_LRECNO:
+ s = "recno leaf";
+ break;
+ case P_OVERFLOW:
+ s = "overflow";
+ break;
+ case P_QAMMETA:
+ s = "queue metadata";
+ break;
+ case P_QAMDATA:
+ s = "queue";
+ break;
+ default:
+ /* Just return a NULL. */
+ break;
+ }
+ return (s);
+}
+
+/*
+ * __db_prheader --
+ * Write out header information in the format expected by db_load.
+ *
+ * PUBLIC: int __db_prheader __P((DB *, char *, int, int, void *,
+ * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
+ DB *dbp;
+ char *subname;
+ int pflag, keyflag;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_ENV *dbenv;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ DBT dbt;
+ VRFY_PAGEINFO *pip;
+ char *buf;
+ int buflen, ret, t_ret;
+ u_int32_t dbtype;
+
+ btsp = NULL;
+ hsp = NULL;
+ qsp = NULL;
+ ret = 0;
+ buf = NULL;
+ COMPQUIET(buflen, 0);
+
+ if (dbp == NULL)
+ dbenv = NULL;
+ else
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we've been passed a verifier statistics object, use
+ * that; we're being called in a context where dbp->stat
+ * is unsafe.
+ *
+ * Also, the verifier may set the pflag on a per-salvage basis.
+ * If so, respect that.
+ */
+ if (vdp != NULL) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ return (ret);
+
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ pflag = 1;
+ } else
+ pip = NULL;
+
+ /*
+ * If dbp is NULL, we're being called from inside __db_prdbt,
+ * and this is a special subdatabase for "lost" items. Make it a btree.
+ * Otherwise, set dbtype to the appropriate type for the specified
+ * meta page, or the type of the dbp.
+ */
+ if (dbp == NULL)
+ dbtype = DB_BTREE;
+ else if (pip != NULL)
+ switch (pip->type) {
+ case P_BTREEMETA:
+ if (F_ISSET(pip, VRFY_IS_RECNO))
+ dbtype = DB_RECNO;
+ else
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ default:
+ /*
+ * If the meta page is of a bogus type, it's
+ * because we have a badly corrupt database.
+ * (We must be in the verifier for pip to be non-NULL.)
+ * Pretend we're a Btree and salvage what we can.
+ */
+ DB_ASSERT(F_ISSET(dbp, DB_AM_VERIFYING));
+ dbtype = DB_BTREE;
+ break;
+ }
+ else
+ dbtype = dbp->type;
+
+ if ((ret = callback(handle, "VERSION=3\n")) != 0)
+ goto err;
+ if (pflag) {
+ if ((ret = callback(handle, "format=print\n")) != 0)
+ goto err;
+ } else if ((ret = callback(handle, "format=bytevalue\n")) != 0)
+ goto err;
+
+ /*
+ * 64 bytes is long enough, as a minimum bound, for any of the
+ * fields besides subname. Subname uses __db_prdbt and therefore
+ * does not need buffer space here.
+ */
+ buflen = 64;
+ if ((ret = __os_malloc(dbenv, buflen, &buf)) != 0)
+ goto err;
+ if (subname != NULL) {
+ snprintf(buf, buflen, "database=");
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = subname;
+ dbt.size = (u_int32_t)strlen(subname);
+ if ((ret = __db_prdbt(&dbt,
+ 1, NULL, handle, callback, 0, NULL)) != 0)
+ goto err;
+ }
+ switch (dbtype) {
+ case DB_BTREE:
+ if ((ret = callback(handle, "type=btree\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_RECNUMS))
+ if ((ret =
+ callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (pip->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)pip->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->bt_minkey != 0 &&
+ pip->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)pip->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_AM_RECNUM))
+ if ((ret = callback(handle, "recnum=1\n")) != 0)
+ goto err;
+ if (btsp->bt_maxkey != 0) {
+ snprintf(buf, buflen,
+ "bt_maxkey=%lu\n", (u_long)btsp->bt_maxkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_minkey != 0 && btsp->bt_minkey != DEFMINKEYPAGE) {
+ snprintf(buf, buflen,
+ "bt_minkey=%lu\n", (u_long)btsp->bt_minkey);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = callback(handle, "type=hash\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (pip->h_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)pip->h_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (pip->h_nelem != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)pip->h_nelem);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &hsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (hsp->hash_ffactor != 0) {
+ snprintf(buf, buflen,
+ "h_ffactor=%lu\n", (u_long)hsp->hash_ffactor);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (hsp->hash_nkeys != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)hsp->hash_nkeys);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = callback(handle, "type=queue\n")) != 0)
+ goto err;
+ if (vdp != NULL) {
+ snprintf(buf,
+ buflen, "re_len=%lu\n", (u_long)vdp->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &qsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ snprintf(buf, buflen, "re_len=%lu\n", (u_long)qsp->qs_re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (qsp->qs_extentsize != 0) {
+ snprintf(buf, buflen,
+ "extentsize=%lu\n", (u_long)qsp->qs_extentsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_RECNO:
+ if ((ret = callback(handle, "type=recno\n")) != 0)
+ goto err;
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_IS_RRECNO))
+ if ((ret =
+ callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (pip->re_len > 0) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)pip->re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ }
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto err;
+ }
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
+ if ((ret = callback(handle, "renumber=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ snprintf(buf, buflen,
+ "re_len=%lu\n", (u_long)btsp->bt_re_len);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (btsp->bt_re_pad != 0 && btsp->bt_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", btsp->bt_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ break;
+ case DB_UNKNOWN:
+ DB_ASSERT(0); /* Impossible. */
+ __db_err(dbp->dbenv, "Impossible DB type in __db_prheader");
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (pip != NULL) {
+ if (F_ISSET(pip, VRFY_HAS_DUPS))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(pip, VRFY_HAS_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ /* We should handle page size. XXX */
+ } else {
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ if ((ret = callback(handle, "chksum=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_DUP))
+ if ((ret = callback(handle, "duplicates=1\n")) != 0)
+ goto err;
+ if (F_ISSET(dbp, DB_AM_DUPSORT))
+ if ((ret = callback(handle, "dupsort=1\n")) != 0)
+ goto err;
+ if (!F_ISSET(dbp, DB_AM_PGDEF)) {
+ snprintf(buf, buflen,
+ "db_pagesize=%lu\n", (u_long)dbp->pgsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ }
+
+ if (keyflag && (ret = callback(handle, "keys=1\n")) != 0)
+ goto err;
+
+ ret = callback(handle, "HEADER=END\n");
+
+err: if (pip != NULL &&
+ (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ if (btsp != NULL)
+ __os_ufree(dbenv, btsp);
+ if (hsp != NULL)
+ __os_ufree(dbenv, hsp);
+ if (qsp != NULL)
+ __os_ufree(dbenv, qsp);
+ if (buf != NULL)
+ __os_free(dbenv, buf);
+
+ return (ret);
+}
+
+/*
+ * __db_prfooter --
+ * Print the footer that marks the end of a DB dump. This is trivial,
+ * but for consistency's sake we don't want to put its literal contents
+ * in multiple places.
+ *
+ * PUBLIC: int __db_prfooter __P((void *, int (*)(void *, const void *)));
+ */
+int
+__db_prfooter(handle, callback)
+ void *handle;
+ int (*callback) __P((void *, const void *));
+{
+ return (callback(handle, "DATA=END\n"));
+}
diff --git a/storage/bdb/db/db_rec.c b/storage/bdb/db/db_rec.c
new file mode 100644
index 00000000000..303ab2fe1d4
--- /dev/null
+++ b/storage/bdb/db/db_rec.c
@@ -0,0 +1,897 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_rec.c,v 11.35 2002/08/08 03:57:49 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
+
+/*
+ * PUBLIC: int __db_addrem_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * This log message is generated whenever we add or remove a duplicate
+ * to/from a duplicate page. On recover, we just do the opposite.
+ */
+int
+__db_addrem_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_addrem_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_addrem_print);
+ REC_INTRO(__db_addrem_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_DUP)) {
+
+ /* Need to redo an add, or undo a delete. */
+ if ((ret = __db_pitem(dbc, pagep, argp->indx, argp->nbytes,
+ argp->hdr.size == 0 ? NULL : &argp->hdr,
+ argp->dbt.size == 0 ? NULL : &argp->dbt)) != 0)
+ goto out;
+
+ change = DB_MPOOL_DIRTY;
+
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_DUP) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_DUP)) {
+ /* Need to undo an add, or redo a delete. */
+ if ((ret = __db_ditem(dbc,
+ pagep, argp->indx, argp->nbytes)) != 0)
+ goto out;
+ change = DB_MPOOL_DIRTY;
+ }
+
+ if (change) {
+ if (DB_REDO(op))
+ LSN(pagep) = *lsnp;
+ else
+ LSN(pagep) = argp->pagelsn;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * PUBLIC: int __db_big_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_big_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_big_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_big_print);
+ REC_INTRO(__db_big_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /*
+ * There are three pages we need to check. The one on which we are
+ * adding data, the previous one whose next_pointer may have
+ * been updated, and the next one whose prev_pointer may have
+ * been updated.
+ */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ change = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
+ /* We are either redo-ing an add, or undoing a delete. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, argp->prev_pgno,
+ argp->next_pgno, 0, P_OVERFLOW);
+ OV_LEN(pagep) = argp->dbt.size;
+ OV_REF(pagep) = 1;
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(file_dbp), argp->dbt.data,
+ argp->dbt.size);
+ PREV_PGNO(pagep) = argp->prev_pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if ((cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_ADD_BIG) ||
+ (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) {
+ /*
+ * We are either undo-ing an add or redo-ing a delete.
+ * The page is about to be reclaimed in either case, so
+ * there really isn't anything to do here.
+ */
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /*
+ * We only delete a whole chain of overflow.
+ * Each page is handled individually
+ */
+ if (argp->opcode == DB_REM_BIG)
+ goto done;
+
+ /* Now check the previous page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto npage;
+ } else
+ if ((ret = mpf->get(mpf, &argp->prev_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+
+ if (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) {
+ /* Redo add, undo delete. */
+ NEXT_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 &&
+ DB_UNDO(op) && argp->opcode == DB_ADD_BIG) {
+ /* Redo delete, undo add. */
+ NEXT_PGNO(pagep) = argp->next_pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ }
+ pagep = NULL;
+
+ /* Now check the next page. Can only be set on a delete. */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ change = 0;
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else
+ if ((ret = mpf->get(mpf, &argp->next_pgno,
+ DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ PREV_PGNO(pagep) = argp->pgno;
+ change = DB_MPOOL_DIRTY;
+ }
+ if (change)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
+ goto out;
+ }
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_ovref_recover --
+ * Recovery function for __db_ovref().
+ *
+ * PUBLIC: int __db_ovref_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_ovref_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_ovref_print);
+ REC_INTRO(__db_ovref_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op))
+ goto done;
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp, &LSN(pagep), &argp->lsn);
+ if (cmp == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ OV_REF(pagep) += argp->adjust;
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ OV_REF(pagep) -= argp->adjust;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_relink_recover --
+ * Recovery function for relink.
+ *
+ * PUBLIC: int __db_relink_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_relink_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_relink_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_relink_print);
+ REC_INTRO(__db_relink_read, 1);
+
+ /*
+ * There are up to three pages we need to check -- the page, and the
+ * previous and next pages, if they existed. For a page add operation,
+ * the current page is the result of a split and is being recovered
+ * elsewhere, so all we need do is recover the next page.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ goto next2;
+ }
+ modified = 0;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto next1;
+
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->next;
+ pagep->prev_pgno = argp->prev;
+
+ pagep->lsn = argp->lsn;
+ modified = 1;
+ }
+next1: if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+next2: if ((ret = mpf->get(mpf, &argp->next, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->next, ret);
+ goto out;
+ }
+ goto prev;
+ }
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_next);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_next);
+ if ((argp->opcode == DB_REM_PAGE && cmp_p == 0 && DB_REDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_n == 0 && DB_UNDO(op))) {
+ /* Redo the remove or undo the add. */
+ pagep->prev_pgno = argp->prev;
+
+ modified = 1;
+ } else if ((argp->opcode == DB_REM_PAGE && cmp_n == 0 && DB_UNDO(op)) ||
+ (argp->opcode == DB_ADD_PAGE && cmp_p == 0 && DB_REDO(op))) {
+ /* Undo the remove or redo the add. */
+ pagep->prev_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_next;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+ if (argp->opcode == DB_ADD_PAGE)
+ goto done;
+
+prev: if ((ret = mpf->get(mpf, &argp->prev, 0, &pagep)) != 0) {
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, argp->prev, ret);
+ goto out;
+ }
+ goto done;
+ }
+ modified = 0;
+ cmp_p = log_compare(&LSN(pagep), &argp->lsn_prev);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn_prev);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the relink. */
+ pagep->next_pgno = argp->next;
+
+ modified = 1;
+ } else if (log_compare(lsnp, &LSN(pagep)) == 0 && DB_UNDO(op)) {
+ /* Undo the relink. */
+ pagep->next_pgno = argp->pgno;
+
+ modified = 1;
+ }
+ if (modified == 1) {
+ if (DB_UNDO(op))
+ pagep->lsn = argp->lsn_prev;
+ else
+ pagep->lsn = *lsnp;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_debug_recover --
+ * Recovery function for debug.
+ *
+ * PUBLIC: int __db_debug_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_debug_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_debug_args *argp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__db_debug_print);
+ REC_NOOP_INTRO(__db_debug_read);
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __db_noop_recover --
+ * Recovery function for noop.
+ *
+ * PUBLIC: int __db_noop_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_noop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_noop_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t change;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__db_noop_print);
+ REC_INTRO(__db_noop_read, 0);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ change = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ change = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ LSN(pagep) = argp->prevlsn;
+ change = DB_MPOOL_DIRTY;
+ }
+ ret = mpf->put(mpf, pagep, change);
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __db_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, created, level, modified, ret;
+
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_alloc_print);
+ REC_INTRO(__db_pg_alloc_read, 0);
+
+ /*
+ * Fix up the allocated page. If we're redoing the operation, we have
+ * to get the page (creating it if it doesn't exist), and update its
+ * LSN. If we're undoing the operation, we have to reset the page's
+ * LSN and put it on the free list.
+ *
+ * Fix up the metadata page. If we're redoing the operation, we have
+ * to get the metadata page and update its LSN and its free pointer.
+ * If we're undoing the operation and the page was ever created, we put
+ * it on the freelist.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ } else
+ goto done;
+ }
+ created = modified = 0;
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ /*
+ * We have to be able to identify if a page was newly
+ * created so we can recover it properly. We cannot simply
+ * look for an empty header, because hash uses a pgin
+ * function that will set the header. Instead, we explicitly
+ * try for the page without CREATE and if that fails, then
+ * create it.
+ */
+ if ((ret =
+ mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ created = modified = 1;
+ }
+
+ /* Fix up the allocated page. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->page_lsn);
+
+ /*
+ * If an inital allocation is aborted and then reallocated
+ * during an archival restore the log record will have
+ * an LSN for the page but the page will be empty.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)))
+ cmp_p = 0;
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
+ /*
+ * If we we rolled back this allocation previously during an
+ * archive restore, the page may have the LSN of the meta page
+ * at the point of the roll back. This will be no more
+ * than the LSN of the metadata page at the time of this allocation.
+ * Another special case we have to handle is if we ended up with a
+ * page of all 0's which can happen if we abort between allocating a
+ * page in mpool and initializing it. In that case, even if we're
+ * undoing, we need to re-initialize the page.
+ */
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(argp->page_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ switch (argp->ptype) {
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ level = LEAFLEVEL;
+ break;
+ default:
+ level = 0;
+ break;
+ }
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype);
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (DB_UNDO(op) && (cmp_n == 0 || created)) {
+ /*
+ * This is where we handle the case of a 0'd page (pagep->pgno
+ * is equal to PGNO_INVALID).
+ * Undo the allocation, reinitialize the page and
+ * link its next pointer to the free list.
+ */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+
+ pagep->lsn = argp->page_lsn;
+ modified = 1;
+ }
+
+ /*
+ * If the page was newly created, put it on the limbo list.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) {
+ /* Put the page in limbo.*/
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->pgno, 1)) != 0)
+ goto out;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Fix up the metadata page. */
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ LSN(meta) = *lsnp;
+ meta->free = argp->next;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ LSN(meta) = argp->meta_lsn;
+
+ /*
+ * If the page has a zero LSN then its newly created
+ * and will go into limbo rather than directly on the
+ * free list.
+ */
+ if (!IS_ZERO_LSN(argp->page_lsn))
+ meta->free = argp->pgno;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+ /*
+ * This could be the metapage from a subdb which is read from disk
+ * to recover its creation.
+ */
+ if (F_ISSET(file_dbp, DB_AM_SUBDB))
+ switch (argp->type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ file_dbp->sync(file_dbp, 0);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __db_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_free_print);
+ REC_INTRO(__db_pg_free_read, 1);
+
+ /*
+ * Fix up the freed page. If we're redoing the operation we get the
+ * page and explicitly discard its contents, then update its LSN. If
+ * we're undoing the operation, we get the page and restore its header.
+ * Create the page if necessary, we may be freeing an aborted
+ * create.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ modified = 0;
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(copy_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+ pagep->lsn = *lsnp;
+
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->header.data, argp->header.size);
+
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /*
+ * Fix up the metadata page. If we're redoing or undoing the operation
+ * we get the page and update its LSN and free pointer.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist. */
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo the deallocation. */
+ meta->free = argp->pgno;
+ LSN(meta) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo the deallocation. */
+ meta->free = argp->next;
+ LSN(meta) = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_cksum_recover --
+ * Recovery function for checksum failure log record.
+ *
+ * PUBLIC: int __db_cksum_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_cksum_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_cksum_args *argp;
+
+ int ret;
+
+ COMPQUIET(info, NULL);
+ COMPQUIET(lsnp, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
+
+ REC_PRINT(__db_cksum_print);
+
+ if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * We had a checksum failure -- the only option is to run catastrophic
+ * recovery.
+ */
+ if (F_ISSET(dbenv, DB_ENV_FATAL))
+ ret = 0;
+ else {
+ __db_err(dbenv,
+ "Checksum failure requires catastrophic recovery");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+
+ __os_free(dbenv, argp);
+ return (ret);
+}
diff --git a/storage/bdb/db/db_reclaim.c b/storage/bdb/db/db_reclaim.c
new file mode 100644
index 00000000000..9aa39bcfa9b
--- /dev/null
+++ b/storage/bdb/db/db_reclaim.c
@@ -0,0 +1,248 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_reclaim.c,v 11.28 2002/08/06 06:11:17 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+
+/*
+ * __db_traverse_big
+ * Traverse a chain of overflow pages and call the callback routine
+ * on each one. The calling convention for the callback is:
+ * callback(dbp, page, cookie, did_put),
+ * where did_put is a return value indicating if the page in question has
+ * already been returned to the mpool.
+ *
+ * PUBLIC: int __db_traverse_big __P((DB *,
+ * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+ */
+int
+__db_traverse_big(dbp, pgno, callback, cookie)
+ DB *dbp;
+ db_pgno_t pgno;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *p;
+ int did_put, ret;
+
+ mpf = dbp->mpf;
+
+ do {
+ did_put = 0;
+ if ((ret = mpf->get(mpf, &pgno, 0, &p)) != 0)
+ return (ret);
+ pgno = NEXT_PGNO(p);
+ if ((ret = callback(dbp, p, cookie, &did_put)) == 0 &&
+ !did_put)
+ ret = mpf->put(mpf, p, 0);
+ } while (ret == 0 && pgno != PGNO_INVALID);
+
+ return (ret);
+}
+
+/*
+ * __db_reclaim_callback
+ * This is the callback routine used during a delete of a subdatabase.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages. Since they share common code for duplicates and overflow
+ * items, we traverse them identically and use this routine to do the
+ * actual free. The reason that this is callback is because hash uses
+ * the same traversal code for statistics gathering.
+ *
+ * PUBLIC: int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_reclaim_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+
+ if ((ret = __db_free(cookie, p)) != 0)
+ return (ret);
+ *putp = 1;
+
+ return (0);
+}
+
+/*
+ * __db_truncate_callback
+ * This is the callback routine used during a truncate.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages.
+ *
+ * PUBLIC: int __db_truncate_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_truncate_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ DBMETA *meta;
+ DBT ldbt;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ db_indx_t indx, len, off, tlen, top;
+ db_pgno_t pgno;
+ db_trunc_param *param;
+ u_int8_t *hk, type;
+ int ret;
+
+ top = NUM_ENT(p);
+ mpf = dbp->mpf;
+ param = cookie;
+ *putp = 1;
+
+ switch (TYPE(p)) {
+ case P_LBTREE:
+ /* Skip for off-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ type = GET_BKEYDATA(dbp, p, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++param->count;
+ }
+ /* FALLTHROUGH */
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_INVALID:
+ if (dbp->type != DB_HASH &&
+ ((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE;
+ goto reinit;
+ }
+ break;
+ case P_OVERFLOW:
+ if (DBC_LOGGING(param->dbc)) {
+ if ((ret = __db_ovref_log(dbp, param->dbc->txn,
+ &LSN(p), 0, p->pgno, -1, &LSN(p))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+ if (--OV_REF(p) != 0)
+ *putp = 0;
+ break;
+ case P_LRECNO:
+ param->count += top;
+ if (((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = P_LRECNO;
+ goto reinit;
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, p, indx)->type))
+ ++param->count;
+
+ break;
+ case P_HASH:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(dbp, p, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ ++param->count;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(dbp, p, 0, indx);
+ hk = H_PAIRDATA(dbp, p, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ ++param->count;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ /* Don't free the head of the bucket. */
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ type = P_HASH;
+
+reinit: *putp = 0;
+ if (DBC_LOGGING(param->dbc)) {
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(param->dbc, LCK_ALWAYS,
+ pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf,
+ &pgno, 0, (PAGE **)&meta)) != 0) {
+ goto err;
+ }
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = p;
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ p->pgno, &LSN(meta),
+ PGNO_BASE_MD, &ldbt, meta->free)) != 0)
+ goto err;
+ LSN(p) = LSN(meta);
+
+ if ((ret =
+ __db_pg_alloc_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD,
+ &p->lsn, p->pgno, type, meta->free)) != 0) {
+err: (void)mpf->put(mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ LSN(p) = LSN(meta);
+
+ if ((ret = mpf->put(mpf,
+ (PAGE *)meta, DB_MPOOL_DIRTY)) != 0) {
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ if ((ret = __TLPUT(param->dbc, metalock)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+
+ P_INIT(p, dbp->pgsize, PGNO(p), PGNO_INVALID,
+ PGNO_INVALID, type == P_HASH ? 0 : 1, type);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, p->pgno));
+ }
+
+ if (*putp == 1) {
+ if ((ret = __db_free(param->dbc, p)) != 0)
+ return (ret);
+ } else {
+ if ((ret = mpf->put(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ *putp = 1;
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/db/db_remove.c b/storage/bdb/db/db_remove.c
new file mode 100644
index 00000000000..ef11c342555
--- /dev/null
+++ b/storage/bdb/db/db_remove.c
@@ -0,0 +1,318 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_remove.c,v 11.203 2002/08/19 18:34:18 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __db_subdb_remove __P((DB *, DB_TXN *, const char *, const char *));
+static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *));
+
+/*
+ * __dbenv_dbremove
+ * Remove method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbremove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbremove(dbenv, txn, name, subdb, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbremove");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_remove_i(dbp, txn, name, subdb);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove
+ * Remove method for DB.
+ *
+ * PUBLIC: int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+ */
+int
+__db_remove(dbp, name, subdb, flags)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->remove", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Remove the file. */
+ ret = __db_remove_i(dbp, NULL, name, subdb);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove_i
+ * Internal remove method for DB.
+ *
+ * PUBLIC: int __db_remove_i __P((DB *, DB_TXN *, const char *, const char *));
+ */
+int
+__db_remove_i(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ /* Handle subdatabase removes separately. */
+ if (subdb != NULL)
+ return (__db_subdb_remove(dbp, txn, name, subdb));
+
+ /* Handle transactional file removes separately. */
+ if (txn != NULL)
+ return (__db_dbtxn_remove(dbp, txn, name));
+
+ /*
+ * The remaining case is a non-transactional file remove.
+ *
+ * Find the real name of the file.
+ */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ if ((ret = __fop_remove_setup(dbp, NULL, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, NULL, name, subdb, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA);
+
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_remove --
+ * Remove a subdatabase.
+ */
+static int
+__db_subdb_remove(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB *mdbp, *sdbp;
+ int ret, t_ret;
+
+ mdbp = sdbp = NULL;
+
+ /* Open the subdatabase. */
+ if ((ret = db_create(&sdbp, dbp->dbenv, 0)) != 0)
+ goto err;
+ if ((ret = __db_open(sdbp,
+ txn, name, subdb, DB_UNKNOWN, DB_WRITEOPEN, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_PREDESTROY, ret, name);
+
+ /* Free up the pages in the subdatabase. */
+ switch (sdbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ sdbp->dbenv, "__db_subdb_remove", sdbp->type);
+ goto err;
+ }
+
+ /*
+ * Remove the entry from the main database and free the subdatabase
+ * metadata page.
+ */
+ if ((ret = __db_master_open(sdbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(
+ mdbp, sdbp, txn, subdb, sdbp->type, MU_REMOVE, NULL, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Close the main and subdatabases. */
+ if ((t_ret = __db_close_i(sdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static int
+__db_dbtxn_remove(dbp, txn, name)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *tmpname;
+
+ dbenv = dbp->dbenv;
+ tmpname = NULL;
+
+ /*
+ * This is a transactional rename, so we have to keep the name
+ * of the file locked until the transaction commits. As a result,
+ * we implement remove by renaming the file to some other name
+ * (which creates a dummy named file as a placeholder for the
+ * file being rename/dremoved) and then deleting that file as
+ * a delayed remove at commit.
+ */
+ if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ return (ret);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if ((ret = __db_rename_i(dbp, txn, name, NULL, tmpname)) != 0)
+ goto err;
+
+ /* The internal removes will also translate into delayed removes. */
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (tmpname != NULL)
+ __os_free(dbenv, tmpname);
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_rename.c b/storage/bdb/db/db_rename.c
new file mode 100644
index 00000000000..87f88232cda
--- /dev/null
+++ b/storage/bdb/db/db_rename.c
@@ -0,0 +1,297 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_rename.c,v 11.203 2002/08/07 16:16:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+static int __db_subdb_rename __P(( DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+/*
+ * __dbenv_dbrename
+ * Rename method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbrename __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbrename(dbenv, txn, name, subdb, newname, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbrename");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_rename_i(dbp, txn, name, subdb, newname);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename
+ * Rename method for DB.
+ *
+ * PUBLIC: int __db_rename __P((DB *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__db_rename(dbp, name, subdb, newname, flags)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->rename", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Rename the file. */
+ ret = __db_rename_i(dbp, NULL, name, subdb, newname);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename_i
+ * Internal rename method for DB.
+ *
+ * PUBLIC: int __db_rename_i __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, const char *));
+ */
+int
+__db_rename_i(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if (subdb != NULL) {
+ ret = __db_subdb_rename(dbp, txn, name, subdb, newname);
+ goto err;
+ }
+
+ /* From here on down, this pertains to files. */
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if ((ret = __fop_remove_setup(dbp, txn, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_rename != NULL &&
+ (ret = dbp->db_am_rename(dbp, txn, name, subdb, newname)) != 0)
+ goto err;
+
+ /*
+ * The transactional case and non-transactional case are
+ * quite different. In the non-transactional case, we simply
+ * do the rename. In the transactional case, since we need
+ * the ability to back out and maintain locking, we have to
+ * create a temporary object as a placeholder. This is all
+ * taken care of in the fop layer.
+ */
+ if (txn != NULL) {
+ if ((ret = __fop_dummy(dbp, txn, name, newname, 0)) != 0)
+ goto err;
+ } else {
+ if ((ret = __fop_dbrename(dbp, name, newname)) != 0)
+ goto err;
+ }
+
+ /*
+ * I am pretty sure that we haven't gotten a dbreg id, so calling
+ * dbreg_filelist_update is not necessary.
+ */
+ DB_ASSERT(dbp->log_filename == NULL ||
+ dbp->log_filename->id == DB_LOGFILEID_INVALID);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, newname);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_rename --
+ * Rename a subdatabase.
+ */
+static int
+__db_subdb_rename(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ PAGE *meta;
+ int ret, t_ret;
+
+ mdbp = NULL;
+ meta = NULL;
+ dbenv = dbp->dbenv;
+
+ /*
+ * We have not opened this dbp so it isn't marked as a subdb,
+ * but it ought to be.
+ */
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /*
+ * Rename the entry in the main database. We need to first
+ * get the meta-data page number (via MU_OPEN) so that we can
+ * read the meta-data page and obtain a handle lock. Once we've
+ * done that, we can proceed to do the rename in the master.
+ */
+ if ((ret = __db_master_open(dbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn, subdb, dbp->type,
+ MU_OPEN, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = mdbp->mpf->get(mdbp->mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ memcpy(&dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, mdbp->lid, DB_LOCK_WRITE, NULL, 0)) != 0)
+ goto err;
+
+ ret = mdbp->mpf->put(mdbp->mpf, meta, 0);
+ meta = NULL;
+ if (ret != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn,
+ subdb, dbp->type, MU_RENAME, newname, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (meta != NULL &&
+ (t_ret = mdbp->mpf->put(mdbp->mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_ret.c b/storage/bdb/db/db_ret.c
new file mode 100644
index 00000000000..b1af7b4ffeb
--- /dev/null
+++ b/storage/bdb/db/db_ret.c
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_ret.c,v 11.21 2002/03/28 19:21:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __db_ret --
+ * Build return DBT.
+ *
+ * PUBLIC: int __db_ret __P((DB *,
+ * PUBLIC: PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+ */
+int
+__db_ret(dbp, h, indx, dbt, memp, memsize)
+ DB *dbp;
+ PAGE *h;
+ u_int32_t indx;
+ DBT *dbt;
+ void **memp;
+ u_int32_t *memsize;
+{
+ BKEYDATA *bk;
+ HOFFPAGE ho;
+ BOVERFLOW *bo;
+ u_int32_t len;
+ u_int8_t *hk;
+ void *data;
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ hk = P_ENTRY(dbp, h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&ho, hk, sizeof(HOFFPAGE));
+ return (__db_goff(dbp, dbt,
+ ho.tlen, ho.pgno, memp, memsize));
+ }
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, indx);
+ data = HKEYDATA_DATA(hk);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ bk = GET_BKEYDATA(dbp, h, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ return (__db_goff(dbp, dbt,
+ bo->tlen, bo->pgno, memp, memsize));
+ }
+ len = bk->len;
+ data = bk->data;
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
+ }
+
+ return (__db_retcopy(dbp->dbenv, dbt, data, len, memp, memsize));
+}
+
+/*
+ * __db_retcopy --
+ * Copy the returned data into the user's DBT, handling special flags.
+ *
+ * PUBLIC: int __db_retcopy __P((DB_ENV *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__db_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ int ret;
+
+ /* If returning a partial record, reset the length. */
+ if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
+ data = (u_int8_t *)data + dbt->doff;
+ if (len > dbt->doff) {
+ len -= dbt->doff;
+ if (len > dbt->dlen)
+ len = dbt->dlen;
+ } else
+ len = 0;
+ }
+
+ /*
+ * Return the length of the returned record in the DBT size field.
+ * This satisfies the requirement that if we're using user memory
+ * and insufficient memory was provided, return the amount necessary
+ * in the size field.
+ */
+ dbt->size = len;
+
+ /*
+ * Allocate memory to be owned by the application: DB_DBT_MALLOC,
+ * DB_DBT_REALLOC.
+ *
+ * !!!
+ * We always allocate memory, even if we're copying out 0 bytes. This
+ * guarantees consistency, i.e., the application can always free memory
+ * without concern as to how many bytes of the record were requested.
+ *
+ * Use the memory specified by the application: DB_DBT_USERMEM.
+ *
+ * !!!
+ * If the length we're going to copy is 0, the application-supplied
+ * memory pointer is allowed to be NULL.
+ */
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ if ((ret = __os_umalloc(dbenv, len, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
+ if ((ret = __os_urealloc(dbenv, len, &dbt->data)) != 0)
+ return (ret);
+ } else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
+ if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
+ return (ENOMEM);
+ } else if (memp == NULL || memsize == NULL) {
+ return (EINVAL);
+ } else {
+ if (len != 0 && (*memsize == 0 || *memsize < len)) {
+ if ((ret = __os_realloc(dbenv, len, memp)) != 0) {
+ *memsize = 0;
+ return (ret);
+ }
+ *memsize = len;
+ }
+ dbt->data = *memp;
+ }
+
+ if (len != 0)
+ memcpy(dbt->data, data, len);
+ return (0);
+}
diff --git a/storage/bdb/db/db_truncate.c b/storage/bdb/db/db_truncate.c
new file mode 100644
index 00000000000..49546ae51b9
--- /dev/null
+++ b/storage/bdb/db/db_truncate.c
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_truncate.c,v 11.185 2002/08/07 16:16:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+
+/*
+ * __db_truncate
+ * truncate method for DB.
+ *
+ * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ */
+int
+__db_truncate(dbp, txn, countp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp, flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ ret = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_fchk(dbenv, "DB->truncate", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL);
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ dbenv, "__db_truncate", dbp->type);
+ goto err;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, NULL);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_upg.c b/storage/bdb/db/db_upg.c
new file mode 100644
index 00000000000..c0eb72f3713
--- /dev/null
+++ b/storage/bdb/db/db_upg.c
@@ -0,0 +1,341 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_upg.c,v 11.29 2002/03/27 18:59:04 krinsky Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+
+static int (* const func_31_list[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)) = {
+ NULL, /* P_INVALID */
+ NULL, /* __P_DUPLICATE */
+ __ham_31_hash, /* P_HASH */
+ NULL, /* P_IBTREE */
+ NULL, /* P_IRECNO */
+ __bam_31_lbtree, /* P_LBTREE */
+ NULL, /* P_LRECNO */
+ NULL, /* P_OVERFLOW */
+ __ham_31_hashmeta, /* P_HASHMETA */
+ __bam_31_btreemeta, /* P_BTREEMETA */
+};
+
+static int __db_page_pass __P((DB *, char *, u_int32_t, int (* const [])
+ (DB *, char *, u_int32_t, DB_FH *, PAGE *, int *), DB_FH *));
+
+/*
+ * __db_upgrade --
+ * Upgrade an existing database.
+ *
+ * PUBLIC: int __db_upgrade __P((DB *, const char *, u_int32_t));
+ */
+int
+__db_upgrade(dbp, fname, flags)
+ DB *dbp;
+ const char *fname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ size_t n;
+ int ret, t_ret;
+ u_int8_t mbuf[256];
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->upgrade", flags, DB_DUPSORT)) != 0)
+ return (ret);
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, fname, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ /* Open the file. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Initialize the feedback. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 0);
+
+ /*
+ * Read the metadata page. We read 256 bytes, which is larger than
+ * any access method's metadata page and smaller than any disk sector.
+ */
+ if ((ret = __os_read(dbenv, &fh, mbuf, sizeof(mbuf), &n)) != 0)
+ goto err;
+
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 6:
+ /*
+ * Before V7 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __bam_30_btreemeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 8:
+ case 9:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported btree version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_HASHMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 4:
+ case 5:
+ /*
+ * Before V6 not all pages had page types, so we do the
+ * single meta-data page by hand.
+ */
+ if ((ret =
+ __ham_30_hashmeta(dbp, real_name, mbuf)) != 0)
+ goto err;
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+
+ /*
+ * Before V6, we created hash pages one by one as they
+ * were needed, using hashhdr.ovfl_point to reserve
+ * a block of page numbers for them. A consequence
+ * of this was that, if no overflow pages had been
+ * created, the current doubling might extend past
+ * the end of the database file.
+ *
+ * In DB 3.X, we now create all the hash pages
+ * belonging to a doubling atomicly; it's not
+ * safe to just save them for later, because when
+ * we create an overflow page we'll just create
+ * a new last page (whatever that may be). Grow
+ * the database to the end of the current doubling.
+ */
+ if ((ret =
+ __ham_30_sizefix(dbp, &fh, real_name, mbuf)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 6:
+ /*
+ * We need the page size to do more. Rip it out of
+ * the meta-data page.
+ */
+ memcpy(&dbp->pgsize, mbuf + 20, sizeof(u_int32_t));
+
+ if ((ret = __db_page_pass(
+ dbp, real_name, flags, func_31_list, &fh)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 7:
+ case 8:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported hash version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ case DB_QAMMAGIC:
+ switch (((DBMETA *)mbuf)->version) {
+ case 1:
+ /*
+ * If we're in a Queue database, the only page that
+ * needs upgrading is the meta-database page, don't
+ * bother with a full pass.
+ */
+ if ((ret = __qam_31_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case 2:
+ if ((ret = __qam_32_qammeta(dbp, real_name, mbuf)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv,
+ &fh, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ if ((ret = __os_write(dbenv, &fh, mbuf, 256, &n)) != 0)
+ goto err;
+ /* FALLTHROUGH */
+ case 3:
+ case 4:
+ break;
+ default:
+ __db_err(dbenv, "%s: unsupported queue version: %lu",
+ real_name, (u_long)((DBMETA *)mbuf)->version);
+ ret = DB_OLD_VERSION;
+ goto err;
+ }
+ break;
+ default:
+ M_32_SWAP(((DBMETA *)mbuf)->magic);
+ switch (((DBMETA *)mbuf)->magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ __db_err(dbenv,
+ "%s: DB->upgrade only supported on native byte-order systems",
+ real_name);
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unrecognized file type", real_name);
+ break;
+ }
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __os_fsync(dbenv, &fh);
+
+err: if ((t_ret = __os_closehandle(dbenv, &fh)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, real_name);
+
+ /* We're done. */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, 100);
+
+ return (ret);
+}
+
+/*
+ * __db_page_pass --
+ * Walk the pages of the database, upgrading whatever needs it.
+ */
+static int
+__db_page_pass(dbp, real_name, flags, fl, fhp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ int (* const fl[P_PAGETYPE_MAX])
+ __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ DB_FH *fhp;
+{
+ DB_ENV *dbenv;
+ PAGE *page;
+ db_pgno_t i, pgno_last;
+ size_t n;
+ int dirty, ret;
+
+ dbenv = dbp->dbenv;
+
+ /* Determine the last page of the file. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ return (ret);
+
+ /* Allocate memory for a single page. */
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &page)) != 0)
+ return (ret);
+
+ /* Walk the file, calling the underlying conversion functions. */
+ for (i = 0; i < pgno_last; ++i) {
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_UPGRADE, (i * 100)/pgno_last);
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_read(dbenv, fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ dirty = 0;
+ if (fl[TYPE(page)] != NULL && (ret = fl[TYPE(page)]
+ (dbp, real_name, flags, fhp, page, &dirty)) != 0)
+ break;
+ if (dirty) {
+ if ((ret = __os_seek(dbenv,
+ fhp, dbp->pgsize, i, 0, 0, DB_OS_SEEK_SET)) != 0)
+ break;
+ if ((ret = __os_write(dbenv,
+ fhp, page, dbp->pgsize, &n)) != 0)
+ break;
+ }
+ }
+
+ __os_free(dbp->dbenv, page);
+ return (ret);
+}
+
+/*
+ * __db_lastpgno --
+ * Return the current last page number of the file.
+ *
+ * PUBLIC: int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+ */
+int
+__db_lastpgno(dbp, real_name, fhp, pgno_lastp)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ db_pgno_t *pgno_lastp;
+{
+ DB_ENV *dbenv;
+ db_pgno_t pgno_last;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __os_ioinfo(dbenv,
+ real_name, fhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", real_name, db_strerror(ret));
+ return (ret);
+ }
+
+ /* Page sizes have to be a power-of-two. */
+ if (bytes % dbp->pgsize != 0) {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize", real_name);
+ return (EINVAL);
+ }
+ pgno_last = mbytes * (MEGABYTE / dbp->pgsize);
+ pgno_last += bytes / dbp->pgsize;
+
+ *pgno_lastp = pgno_last;
+ return (0);
+}
diff --git a/storage/bdb/db/db_upg_opd.c b/storage/bdb/db/db_upg_opd.c
new file mode 100644
index 00000000000..f410b797bff
--- /dev/null
+++ b/storage/bdb/db/db_upg_opd.c
@@ -0,0 +1,352 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_upg_opd.c,v 11.18 2002/08/06 06:11:18 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+
+static int __db_build_bi __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_build_ri __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
+static int __db_up_ovref __P((DB *, DB_FH *, db_pgno_t));
+
+#define GET_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_read(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+#define PUT_PAGE(dbp, fhp, pgno, page) { \
+ if ((ret = __os_seek(dbp->dbenv, \
+ fhp, (dbp)->pgsize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0) \
+ goto err; \
+ if ((ret = __os_write(dbp->dbenv, \
+ fhp, page, (dbp)->pgsize, &n)) != 0) \
+ goto err; \
+}
+
+/*
+ * __db_31_offdup --
+ * Convert 3.0 off-page duplicates to 3.1 off-page duplicates.
+ *
+ * PUBLIC: int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+ */
+int
+__db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
+ DB *dbp;
+ char *real_name;
+ DB_FH *fhp;
+ int sorted;
+ db_pgno_t *pgnop;
+{
+ PAGE *ipage, *page;
+ db_indx_t indx;
+ db_pgno_t cur_cnt, i, next_cnt, pgno, *pgno_cur, pgno_last;
+ db_pgno_t *pgno_next, pgno_max, *tmp;
+ db_recno_t nrecs;
+ size_t n;
+ int level, nomem, ret;
+
+ ipage = page = NULL;
+ pgno_cur = pgno_next = NULL;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
+ goto err;
+
+ /*
+ * Walk the chain of 3.0 off-page duplicates. Each one is converted
+ * in place to a 3.1 off-page duplicate page. If the duplicates are
+ * sorted, they are converted to a Btree leaf page, otherwise to a
+ * Recno leaf page.
+ */
+ for (nrecs = 0, cur_cnt = pgno_max = 0,
+ pgno = *pgnop; pgno != PGNO_INVALID;) {
+ if (pgno_max == cur_cnt) {
+ pgno_max += 20;
+ if ((ret = __os_realloc(dbp->dbenv, pgno_max *
+ sizeof(db_pgno_t), &pgno_cur)) != 0)
+ goto err;
+ }
+ pgno_cur[cur_cnt++] = pgno;
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ nrecs += NUM_ENT(page);
+ LEVEL(page) = LEAFLEVEL;
+ TYPE(page) = sorted ? P_LDUP : P_LRECNO;
+ /*
+ * !!!
+ * DB didn't zero the LSNs on off-page duplicates pages.
+ */
+ ZERO_LSN(LSN(page));
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+ pgno = NEXT_PGNO(page);
+ }
+
+ /* If we only have a single page, it's easy. */
+ if (cur_cnt > 1) {
+ /*
+ * pgno_cur is the list of pages we just converted. We're
+ * going to walk that list, but we'll need to create a new
+ * list while we do so.
+ */
+ if ((ret = __os_malloc(dbp->dbenv,
+ cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0)
+ goto err;
+
+ /* Figure out where we can start allocating new pages. */
+ if ((ret = __db_lastpgno(dbp, real_name, fhp, &pgno_last)) != 0)
+ goto err;
+
+ /* Allocate room for an internal page. */
+ if ((ret = __os_malloc(dbp->dbenv,
+ dbp->pgsize, &ipage)) != 0)
+ goto err;
+ PGNO(ipage) = PGNO_INVALID;
+ }
+
+ /*
+ * Repeatedly walk the list of pages, building internal pages, until
+ * there's only one page at a level.
+ */
+ for (level = LEAFLEVEL + 1; cur_cnt > 1; ++level) {
+ for (indx = 0, i = next_cnt = 0; i < cur_cnt;) {
+ if (indx == 0) {
+ P_INIT(ipage, dbp->pgsize, pgno_last,
+ PGNO_INVALID, PGNO_INVALID,
+ level, sorted ? P_IBTREE : P_IRECNO);
+ ZERO_LSN(LSN(ipage));
+
+ pgno_next[next_cnt++] = pgno_last++;
+ }
+
+ GET_PAGE(dbp, fhp, pgno_cur[i], page);
+
+ /*
+ * If the duplicates are sorted, put the first item on
+ * the lower-level page onto a Btree internal page. If
+ * the duplicates are not sorted, create an internal
+ * Recno structure on the page. If either case doesn't
+ * fit, push out the current page and start a new one.
+ */
+ nomem = 0;
+ if (sorted) {
+ if ((ret = __db_build_bi(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ } else
+ if ((ret = __db_build_ri(
+ dbp, fhp, ipage, page, indx, &nomem)) != 0)
+ goto err;
+ if (nomem) {
+ indx = 0;
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+ } else {
+ ++indx;
+ ++NUM_ENT(ipage);
+ ++i;
+ }
+ }
+
+ /*
+ * Push out the last internal page. Set the top-level record
+ * count if we've reached the top.
+ */
+ if (next_cnt == 1)
+ RE_NREC_SET(ipage, nrecs);
+ PUT_PAGE(dbp, fhp, PGNO(ipage), ipage);
+
+ /* Swap the current and next page number arrays. */
+ cur_cnt = next_cnt;
+ tmp = pgno_cur;
+ pgno_cur = pgno_next;
+ pgno_next = tmp;
+ }
+
+ *pgnop = pgno_cur[0];
+
+err: if (pgno_cur != NULL)
+ __os_free(dbp->dbenv, pgno_cur);
+ if (pgno_next != NULL)
+ __os_free(dbp->dbenv, pgno_next);
+ if (ipage != NULL)
+ __os_free(dbp->dbenv, ipage);
+ if (page != NULL)
+ __os_free(dbp->dbenv, page);
+
+ return (ret);
+}
+
+/*
+ * __db_build_bi --
+ * Build a BINTERNAL entry for a parent page.
+ */
+static int
+__db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ BINTERNAL bi, *child_bi;
+ BKEYDATA *child_bk;
+ u_int8_t *p;
+ int ret;
+ db_indx_t *inp;
+
+ inp = P_INP(dbp, ipage);
+ switch (TYPE(page)) {
+ case P_IBTREE:
+ child_bi = GET_BINTERNAL(dbp, page, 0);
+ if (P_FREESPACE(dbp, ipage) < BINTERNAL_PSIZE(child_bi->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = child_bi->len;
+ B_TSET(bi.type, child_bi->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bi->data, child_bi->len);
+
+ /* Increment the overflow ref count. */
+ if (B_TYPE(child_bi->type) == B_OVERFLOW)
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)(child_bi->data))->pgno)) != 0)
+ return (ret);
+ break;
+ case P_LDUP:
+ child_bk = GET_BKEYDATA(dbp, page, 0);
+ switch (B_TYPE(child_bk->type)) {
+ case B_KEYDATA:
+ if (P_FREESPACE(dbp, ipage) <
+ BINTERNAL_PSIZE(child_bk->len)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bk->len);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = child_bk->len;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk->data, child_bk->len);
+ break;
+ case B_OVERFLOW:
+ if (P_FREESPACE(dbp, ipage) <
+ BINTERNAL_PSIZE(BOVERFLOW_SIZE)) {
+ *nomemp = 1;
+ return (0);
+ }
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(BOVERFLOW_SIZE);
+ p = P_ENTRY(dbp, ipage, indx);
+
+ bi.len = BOVERFLOW_SIZE;
+ B_TSET(bi.type, child_bk->type, 0);
+ bi.pgno = PGNO(page);
+ bi.nrecs = __bam_total(dbp, page);
+ memcpy(p, &bi, SSZA(BINTERNAL, data));
+ p += SSZA(BINTERNAL, data);
+ memcpy(p, child_bk, BOVERFLOW_SIZE);
+
+ /* Increment the overflow ref count. */
+ if ((ret = __db_up_ovref(dbp, fhp,
+ ((BOVERFLOW *)child_bk)->pgno)) != 0)
+ return (ret);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
+ }
+
+ return (0);
+}
+
+/*
+ * __db_build_ri --
+ * Build a RINTERNAL entry for an internal parent page.
+ */
+static int
+__db_build_ri(dbp, fhp, ipage, page, indx, nomemp)
+ DB *dbp;
+ DB_FH *fhp;
+ PAGE *ipage, *page;
+ u_int32_t indx;
+ int *nomemp;
+{
+ RINTERNAL ri;
+ db_indx_t *inp;
+
+ COMPQUIET(fhp, NULL);
+ inp = P_INP(dbp, ipage);
+ if (P_FREESPACE(dbp, ipage) < RINTERNAL_PSIZE) {
+ *nomemp = 1;
+ return (0);
+ }
+
+ ri.pgno = PGNO(page);
+ ri.nrecs = __bam_total(dbp, page);
+ inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE;
+ memcpy(P_ENTRY(dbp, ipage, indx), &ri, RINTERNAL_SIZE);
+
+ return (0);
+}
+
+/*
+ * __db_up_ovref --
+ * Increment/decrement the reference count on an overflow page.
+ */
+static int
+__db_up_ovref(dbp, fhp, pgno)
+ DB *dbp;
+ DB_FH *fhp;
+ db_pgno_t pgno;
+{
+ PAGE *page;
+ size_t n;
+ int ret;
+
+ /* Allocate room to hold a page. */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
+ return (ret);
+
+ GET_PAGE(dbp, fhp, pgno, page);
+ ++OV_REF(page);
+ PUT_PAGE(dbp, fhp, pgno, page);
+
+err: __os_free(dbp->dbenv, page);
+
+ return (ret);
+}
diff --git a/storage/bdb/db/db_vrfy.c b/storage/bdb/db/db_vrfy.c
new file mode 100644
index 00000000000..1bbecdbd87a
--- /dev/null
+++ b/storage/bdb/db/db_vrfy.c
@@ -0,0 +1,2462 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_vrfy.c,v 1.107 2002/09/03 17:27:15 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_vrfy.c,v 1.107 2002/09/03 17:27:15 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_guesspgsize __P((DB_ENV *, DB_FH *));
+static int __db_is_valid_magicno __P((u_int32_t, DBTYPE *));
+static int __db_is_valid_pagetype __P((u_int32_t));
+static int __db_meta2pgset
+ __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, DB *));
+static int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+static int __db_salvage_subdbpg __P((DB *, VRFY_DBINFO *,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+static int __db_salvage_subdbs
+ __P((DB *, VRFY_DBINFO *, void *,
+ int(*)(void *, const void *), u_int32_t, int *));
+static int __db_salvage_unknowns
+ __P((DB *, VRFY_DBINFO *, void *,
+ int (*)(void *, const void *), u_int32_t));
+static int __db_vrfy_common
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_freelist __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+static int __db_vrfy_invalid
+ __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+static int __db_vrfy_orderchkonly __P((DB *,
+ VRFY_DBINFO *, const char *, const char *, u_int32_t));
+static int __db_vrfy_pagezero __P((DB *, VRFY_DBINFO *, DB_FH *, u_int32_t));
+static int __db_vrfy_subdbs
+ __P((DB *, VRFY_DBINFO *, const char *, u_int32_t));
+static int __db_vrfy_structure
+ __P((DB *, VRFY_DBINFO *, const char *, db_pgno_t, u_int32_t));
+static int __db_vrfy_walkpages
+ __P((DB *, VRFY_DBINFO *, void *, int (*)(void *, const void *),
+ u_int32_t));
+
+/*
+ * This is the code for DB->verify, the DB database consistency checker.
+ * For now, it checks all subdatabases in a database, and verifies
+ * everything it knows how to (i.e. it's all-or-nothing, and one can't
+ * check only for a subset of possible problems).
+ */
+
+/*
+ * __db_verify --
+ * Walk the entire file page-by-page, either verifying with or without
+ * dumping in db_dump -d format, or DB_SALVAGE-ing whatever key/data
+ * pairs can be found and dumping them in standard (db_load-ready)
+ * dump format.
+ *
+ * (Salvaging isn't really a verification operation, but we put it
+ * here anyway because it requires essentially identical top-level
+ * code.)
+ *
+ * flags may be 0, DB_NOORDERCHK, DB_ORDERCHKONLY, or DB_SALVAGE
+ * (and optionally DB_AGGRESSIVE).
+ *
+ * __db_verify itself is simply a wrapper to __db_verify_internal,
+ * which lets us pass appropriate equivalents to FILE * in from the
+ * non-C APIs.
+ *
+ * PUBLIC: int __db_verify
+ * PUBLIC: __P((DB *, const char *, const char *, FILE *, u_int32_t));
+ */
+int
+__db_verify(dbp, file, database, outfile, flags)
+ DB *dbp;
+ const char *file, *database;
+ FILE *outfile;
+ u_int32_t flags;
+{
+
+ return (__db_verify_internal(dbp,
+ file, database, outfile, __db_verify_callback, flags));
+}
+
+/*
+ * __db_verify_callback --
+ * Callback function for using pr_* functions from C.
+ *
+ * PUBLIC: int __db_verify_callback __P((void *, const void *));
+ */
+int
+__db_verify_callback(handle, str_arg)
+ void *handle;
+ const void *str_arg;
+{
+ char *str;
+ FILE *f;
+
+ str = (char *)str_arg;
+ f = (FILE *)handle;
+
+ if (fprintf(f, "%s", str) != (int)strlen(str))
+ return (EIO);
+
+ return (0);
+}
+
+/*
+ * __db_verify_internal --
+ * Inner meat of __db_verify.
+ *
+ * PUBLIC: int __db_verify_internal __P((DB *, const char *,
+ * PUBLIC: const char *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
+ DB *dbp_orig;
+ const char *name, *subdb;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ VRFY_DBINFO *vdp;
+ int has, ret, isbad;
+ char *real_name;
+
+ dbenv = dbp_orig->dbenv;
+ vdp = NULL;
+ real_name = NULL;
+ ret = isbad = 0;
+
+ memset(&fh, 0, sizeof(fh));
+ fhp = &fh;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_AFTER_OPEN(dbp_orig, "verify");
+
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | \
+ DB_PRINTABLE | DB_SALVAGE)
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ /*
+ * DB_SALVAGE is mutually exclusive with the other flags except
+ * DB_AGGRESSIVE and DB_PRINTABLE.
+ */
+ if (LF_ISSET(DB_SALVAGE) &&
+ (flags & ~DB_AGGRESSIVE & ~DB_PRINTABLE) != DB_SALVAGE)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ /* DB_AGGRESSIVE and DB_PRINTABLE are only meaningful when salvaging. */
+ if ((LF_ISSET(DB_AGGRESSIVE) || LF_ISSET(DB_PRINTABLE)) &&
+ !LF_ISSET(DB_SALVAGE))
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && flags != DB_ORDERCHKONLY)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ if (LF_ISSET(DB_ORDERCHKONLY) && subdb == NULL) {
+ __db_err(dbenv, "DB_ORDERCHKONLY requires a database name");
+ return (EINVAL);
+ }
+
+ /*
+ * Forbid working in an environment that uses transactions or
+ * locking; we're going to be looking at the file freely,
+ * and while we're not going to modify it, we aren't obeying
+ * locking conventions either.
+ */
+ if (TXN_ON(dbenv) || LOCKING_ON(dbenv) || LOGGING_ON(dbenv)) {
+ dbp_orig->errx(dbp_orig,
+ "verify may not be used with transactions, logging, or locking");
+ return (EINVAL);
+ /* NOTREACHED */
+ }
+
+ /* Create a dbp to use internally, which we can close at our leisure. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_VERIFYING);
+
+ /* Copy the supplied pagesize, which we use if the file one is bogus. */
+ if (dbp_orig->pgsize >= DB_MIN_PGSIZE &&
+ dbp_orig->pgsize <= DB_MAX_PGSIZE)
+ dbp->set_pagesize(dbp, dbp_orig->pgsize);
+
+ /* Copy the feedback function, if present, and initialize it. */
+ if (!LF_ISSET(DB_SALVAGE) && dbp_orig->db_feedback != NULL) {
+ dbp->set_feedback(dbp, dbp_orig->db_feedback);
+ dbp->db_feedback(dbp, DB_VERIFY, 0);
+ }
+
+ /*
+ * Copy the comparison and hashing functions. Note that
+ * even if the database is not a hash or btree, the respective
+ * internal structures will have been initialized.
+ */
+ if (dbp_orig->dup_compare != NULL &&
+ (ret = dbp->set_dup_compare(dbp, dbp_orig->dup_compare)) != 0)
+ goto err;
+ if (((BTREE *)dbp_orig->bt_internal)->bt_compare != NULL &&
+ (ret = dbp->set_bt_compare(dbp,
+ ((BTREE *)dbp_orig->bt_internal)->bt_compare)) != 0)
+ goto err;
+ if (((HASH *)dbp_orig->h_internal)->h_hash != NULL &&
+ (ret = dbp->set_h_hash(dbp,
+ ((HASH *)dbp_orig->h_internal)->h_hash)) != 0)
+ goto err;
+
+ /*
+ * We don't know how large the cache is, and if the database
+ * in question uses a small page size--which we don't know
+ * yet!--it may be uncomfortably small for the default page
+ * size [#2143]. However, the things we need temporary
+ * databases for in dbinfo are largely tiny, so using a
+ * 1024-byte pagesize is probably not going to be a big hit,
+ * and will make us fit better into small spaces.
+ */
+ if ((ret = __db_vrfy_dbinfo_create(dbenv, 1024, &vdp)) != 0)
+ goto err;
+
+ /*
+ * Note whether the user has requested that we use printable
+ * chars where possible. We won't get here with this flag if
+ * we're not salvaging.
+ */
+ if (LF_ISSET(DB_PRINTABLE))
+ F_SET(vdp, SALVAGE_PRINTABLE);
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /*
+ * Our first order of business is to verify page 0, which is
+ * the metadata page for the master database of subdatabases
+ * or of the only database in the file. We want to do this by hand
+ * rather than just calling __db_open in case it's corrupt--various
+ * things in __db_open might act funny.
+ *
+ * Once we know the metadata page is healthy, I believe that it's
+ * safe to open the database normally and then use the page swapping
+ * code, which makes life easier.
+ */
+ if ((ret = __os_open(dbenv, real_name, DB_OSO_RDONLY, 0444, fhp)) != 0)
+ goto err;
+
+ /* Verify the metadata page 0; set pagesize and type. */
+ if ((ret = __db_vrfy_pagezero(dbp, vdp, fhp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * We can assume at this point that dbp->pagesize and dbp->type are
+ * set correctly, or at least as well as they can be, and that
+ * locking, logging, and txns are not in use. Thus we can trust
+ * the memp code not to look at the page, and thus to be safe
+ * enough to use.
+ *
+ * The dbp is not open, but the file is open in the fhp, and we
+ * cannot assume that __db_open is safe. Call __db_dbenv_setup,
+ * the [safe] part of __db_open that initializes the environment--
+ * and the mpool--manually.
+ */
+ if ((ret = __db_dbenv_setup(dbp, NULL,
+ name, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0)
+ return (ret);
+
+ /* Mark the dbp as opened, so that we correctly handle its close. */
+ F_SET(dbp, DB_AM_OPEN_CALLED);
+
+ /* Find out the page number of the last page in the database. */
+ dbp->mpf->last_pgno(dbp->mpf, &vdp->last_pgno);
+
+ /*
+ * DB_ORDERCHKONLY is a special case; our file consists of
+ * several subdatabases, which use different hash, bt_compare,
+ * and/or dup_compare functions. Consequently, we couldn't verify
+ * sorting and hashing simply by calling DB->verify() on the file.
+ * DB_ORDERCHKONLY allows us to come back and check those things; it
+ * requires a subdatabase, and assumes that everything but that
+ * database's sorting/hashing is correct.
+ */
+ if (LF_ISSET(DB_ORDERCHKONLY)) {
+ ret = __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags);
+ goto done;
+ }
+
+ /*
+ * When salvaging, we use a db to keep track of whether we've seen a
+ * given overflow or dup page in the course of traversing normal data.
+ * If in the end we have not, we assume its key got lost and print it
+ * with key "UNKNOWN".
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_init(vdp)) != 0)
+ return (ret);
+
+ /*
+ * If we're not being aggressive, attempt to crack subdbs.
+ * "has" will indicate whether the attempt has succeeded
+ * (even in part), meaning that we have some semblance of
+ * subdbs; on the walkpages pass, we print out
+ * whichever data pages we have not seen.
+ */
+ has = 0;
+ if (!LF_ISSET(DB_AGGRESSIVE) && (__db_salvage_subdbs(dbp,
+ vdp, handle, callback, flags, &has)) != 0)
+ isbad = 1;
+
+ /*
+ * If we have subdatabases, we need to signal that if
+ * any keys are found that don't belong to a subdatabase,
+ * they'll need to have an "__OTHER__" subdatabase header
+ * printed first. Flag this. Else, print a header for
+ * the normal, non-subdb database.
+ */
+ if (has == 1)
+ F_SET(vdp, SALVAGE_PRINTHEADER);
+ else if ((ret = __db_prheader(dbp,
+ NULL, 0, 0, handle, callback, vdp, PGNO_BASE_MD)) != 0)
+ goto err;
+ }
+
+ if ((ret =
+ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /* If we're verifying, verify inter-page structure. */
+ if (!LF_ISSET(DB_SALVAGE) && isbad == 0)
+ if ((ret =
+ __db_vrfy_structure(dbp, vdp, name, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * If we're salvaging, output with key UNKNOWN any overflow or dup pages
+ * we haven't been able to put in context. Then destroy the salvager's
+ * state-saving database.
+ */
+ if (LF_ISSET(DB_SALVAGE)) {
+ if ((ret = __db_salvage_unknowns(dbp,
+ vdp, handle, callback, flags)) != 0)
+ isbad = 1;
+ /* No return value, since there's little we can do. */
+ __db_salvage_destroy(vdp);
+ }
+
+ if (0) {
+ /* Don't try to strerror() DB_VERIFY_FATAL; it's private. */
+err: if (ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+ (void)__db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ }
+
+ if (LF_ISSET(DB_SALVAGE) &&
+ (has == 0 || F_ISSET(vdp, SALVAGE_PRINTFOOTER)))
+ (void)__db_prfooter(handle, callback);
+
+ /* Send feedback that we're done. */
+done: if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY, 100);
+
+ if (F_ISSET(fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, fhp);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ if (vdp)
+ (void)__db_vrfy_dbinfo_destroy(dbenv, vdp);
+ if (real_name)
+ __os_free(dbenv, real_name);
+
+ if ((ret == 0 && isbad == 1) || ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pagezero --
+ * Verify the master metadata page. Use seek, read, and a local buffer
+ * rather than the DB paging code, for safety.
+ *
+ * Must correctly (or best-guess) set dbp->type and dbp->pagesize.
+ */
+static int
+__db_vrfy_pagezero(dbp, vdp, fhp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DB_FH *fhp;
+ u_int32_t flags;
+{
+ DBMETA *meta;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t freelist;
+ size_t nr;
+ int isbad, ret, swapped;
+ u_int8_t mbuf[DBMETASIZE];
+
+ isbad = ret = swapped = 0;
+ freelist = 0;
+ dbenv = dbp->dbenv;
+ meta = (DBMETA *)mbuf;
+ dbp->type = DB_UNKNOWN;
+
+ /*
+ * Seek to the metadata page.
+ * Note that if we're just starting a verification, dbp->pgsize
+ * may be zero; this is okay, as we want page zero anyway and
+ * 0*0 == 0.
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, &nr)) != 0) {
+ __db_err(dbenv,
+ "Metadata page %lu cannot be read: %s",
+ (u_long)PGNO_BASE_MD, db_strerror(ret));
+ return (ret);
+ }
+
+ if (nr != DBMETASIZE) {
+ EPRINT((dbenv,
+ "Page %lu: Incomplete metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
+
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, 1)) != 0) {
+ EPRINT((dbenv,
+ "Page %lu: metadata page corrupted, (u_long)PGNO_BASE_MD"));
+ isbad = 1;
+ if (ret != -1) {
+ EPRINT((dbenv,
+ "Page %lu: could not check metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
+ }
+
+ /*
+ * Check all of the fields that we can.
+ *
+ * 08-11: Current page number. Must == pgno.
+ * Note that endianness doesn't matter--it's zero.
+ */
+ if (meta->pgno != PGNO_BASE_MD) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: pgno incorrectly set to %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pgno));
+ }
+
+ /* 12-15: Magic number. Must be one of valid set. */
+ if (__db_is_valid_magicno(meta->magic, &dbp->type))
+ swapped = 0;
+ else {
+ M_32_SWAP(meta->magic);
+ if (__db_is_valid_magicno(meta->magic,
+ &dbp->type))
+ swapped = 1;
+ else {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: bad magic number %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->magic));
+ }
+ }
+
+ /*
+ * 16-19: Version. Must be current; for now, we
+ * don't support verification of old versions.
+ */
+ if (swapped)
+ M_32_SWAP(meta->version);
+ if ((dbp->type == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbp->type == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbp->type == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: unsupported DB version %lu; extraneous errors may result",
+ (u_long)PGNO_BASE_MD, (u_long)meta->version));
+ }
+
+ /*
+ * 20-23: Pagesize. Must be power of two,
+ * greater than 512, and less than 64K.
+ */
+ if (swapped)
+ M_32_SWAP(meta->pagesize);
+ if (IS_VALID_PAGESIZE(meta->pagesize))
+ dbp->pgsize = meta->pagesize;
+ else {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page size %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pagesize));
+
+ /*
+ * Now try to settle on a pagesize to use.
+ * If the user-supplied one is reasonable,
+ * use it; else, guess.
+ */
+ if (!IS_VALID_PAGESIZE(dbp->pgsize))
+ dbp->pgsize = __db_guesspgsize(dbenv, fhp);
+ }
+
+ /*
+ * 25: Page type. Must be correct for dbp->type,
+ * which is by now set as well as it can be.
+ */
+ /* Needs no swapping--only one byte! */
+ if ((dbp->type == DB_BTREE && meta->type != P_BTREEMETA) ||
+ (dbp->type == DB_HASH && meta->type != P_HASHMETA) ||
+ (dbp->type == DB_QUEUE && meta->type != P_QAMMETA)) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->type));
+ }
+
+ /*
+ * 28-31: Free list page number.
+ * We'll verify its sensibility when we do inter-page
+ * verification later; for now, just store it.
+ */
+ if (swapped)
+ M_32_SWAP(meta->free);
+ freelist = meta->free;
+
+ /*
+ * Initialize vdp->pages to fit a single pageinfo structure for
+ * this one page. We'll realloc later when we know how many
+ * pages there are.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+ pip->pgno = PGNO_BASE_MD;
+ pip->type = meta->type;
+
+ /*
+ * Signal that we still have to check the info specific to
+ * a given type of meta page.
+ */
+ F_SET(pip, VRFY_INCOMPLETE);
+
+ pip->free = freelist;
+
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+
+ /* Set up the dbp's fileid. We don't use the regular open path. */
+ memcpy(dbp->fileid, meta->uid, DB_FILE_ID_LEN);
+
+ if (swapped == 1)
+ F_SET(dbp, DB_AM_SWAP);
+
+ return (isbad ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __db_vrfy_walkpages --
+ * Main loop of the verifier/salvager. Walks through,
+ * page by page, and verifies all pages and/or prints all data pages.
+ */
+static int
+__db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t i;
+ int ret, t_ret, isbad;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ ret = isbad = t_ret = 0;
+
+ if ((ret = __db_fchk(dbenv,
+ "__db_vrfy_walkpages", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ for (i = 0; i <= vdp->last_pgno; i++) {
+ /*
+ * If DB_SALVAGE is set, we inspect our database of
+ * completed pages, and skip any we've already printed in
+ * the subdb pass.
+ */
+ if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0))
+ continue;
+
+ /*
+ * If an individual page get fails, keep going if and only
+ * if we're salvaging.
+ */
+ if ((t_ret = mpf->get(mpf, &i, 0, &h)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ if (LF_ISSET(DB_SALVAGE))
+ continue;
+ else
+ return (ret);
+ }
+
+ if (LF_ISSET(DB_SALVAGE)) {
+ /*
+ * We pretty much don't want to quit unless a
+ * bomb hits. May as well return that something
+ * was screwy, however.
+ */
+ if ((t_ret = __db_salvage(dbp,
+ vdp, i, h, handle, callback, flags)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ isbad = 1;
+ }
+ } else {
+ /*
+ * If we are not salvaging, and we get any error
+ * other than DB_VERIFY_BAD, return immediately;
+ * it may not be safe to proceed. If we get
+ * DB_VERIFY_BAD, keep going; listing more errors
+ * may make it easier to diagnose problems and
+ * determine the magnitude of the corruption.
+ */
+
+ /*
+ * Verify info common to all page
+ * types.
+ */
+ if (i != PGNO_BASE_MD) {
+ ret = __db_vrfy_common(dbp, vdp, h, i, flags);
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
+
+ switch (TYPE(h)) {
+ case P_INVALID:
+ ret = __db_vrfy_invalid(dbp, vdp, h, i, flags);
+ break;
+ case __P_DUPLICATE:
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: old-style duplicate page",
+ (u_long)i));
+ break;
+ case P_HASH:
+ ret = __ham_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LDUP:
+ ret = __bam_vrfy(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_LRECNO:
+ ret = __ram_vrfy_leaf(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_OVERFLOW:
+ ret = __db_vrfy_overflow(dbp,
+ vdp, h, i, flags);
+ break;
+ case P_HASHMETA:
+ ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)h, i, flags);
+ break;
+ case P_BTREEMETA:
+ ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)h, i, flags);
+ break;
+ case P_QAMMETA:
+ ret = __qam_vrfy_meta(dbp,
+ vdp, (QMETA *)h, i, flags);
+ break;
+ case P_QAMDATA:
+ ret = __qam_vrfy_data(dbp,
+ vdp, (QPAGE *)h, i, flags);
+ break;
+ default:
+ EPRINT((dbenv,
+ "Page %lu: unknown page type %lu",
+ (u_long)i, (u_long)TYPE(h)));
+ isbad = 1;
+ break;
+ }
+
+ /*
+ * Set up error return.
+ */
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else if (ret != 0)
+ goto err;
+
+ /*
+ * Provide feedback to the application about our
+ * progress. The range 0-50% comes from the fact
+ * that this is the first of two passes through the
+ * database (front-to-back, then top-to-bottom).
+ */
+ if (dbp->db_feedback != NULL)
+ dbp->db_feedback(dbp, DB_VERIFY,
+ (i + 1) * 50 / (vdp->last_pgno + 1));
+ }
+
+ /*
+ * Just as with the page get, bail if and only if we're
+ * not salvaging.
+ */
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ if (!LF_ISSET(DB_SALVAGE))
+ return (ret);
+ }
+ }
+
+ if (0) {
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret == 0 ? t_ret : ret);
+ }
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_structure--
+ * After a beginning-to-end walk through the database has been
+ * completed, put together the information that has been collected
+ * to verify the overall database structure.
+ *
+ * Should only be called if we want to do a database verification,
+ * i.e. if DB_SALVAGE is not set.
+ */
+static int
+__db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad, hassubs, p;
+
+ isbad = 0;
+ pip = NULL;
+ dbenv = dbp->dbenv;
+ pgset = vdp->pgset;
+
+ if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_SALVAGE)) {
+ __db_err(dbenv, "__db_vrfy_structure called with DB_SALVAGE");
+ return (EINVAL);
+ }
+
+ /*
+ * Providing feedback here is tricky; in most situations,
+ * we fetch each page one more time, but we do so in a top-down
+ * order that depends on the access method. Worse, we do this
+ * recursively in btree, such that on any call where we're traversing
+ * a subtree we don't know where that subtree is in the whole database;
+ * worse still, any given database may be one of several subdbs.
+ *
+ * The solution is to decrement a counter vdp->pgs_remaining each time
+ * we verify (and call feedback on) a page. We may over- or
+ * under-count, but the structure feedback function will ensure that we
+ * never give a percentage under 50 or over 100. (The first pass
+ * covered the range 0-50%.)
+ */
+ if (dbp->db_feedback != NULL)
+ vdp->pgs_remaining = vdp->last_pgno + 1;
+
+ /*
+ * Call the appropriate function to downwards-traverse the db type.
+ */
+ switch(dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * If we have subdatabases and we know that the database is,
+ * thus far, sound, it's safe to walk the tree of subdatabases.
+ * Do so, and verify the structure of the databases within.
+ */
+ if ((ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) != 0)
+ goto err;
+ hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS) ? 1 : 0;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+
+ if (isbad == 0 && hassubs)
+ if ((ret =
+ __db_vrfy_subdbs(dbp, vdp, dbname, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_HASH:
+ if ((ret = __ham_vrfy_structure(dbp, vdp, 0, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_vrfy_structure(dbp, vdp, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ }
+
+ /*
+ * Queue pages may be unreferenced and totally zeroed, if
+ * they're empty; queue doesn't have much structure, so
+ * this is unlikely to be wrong in any troublesome sense.
+ * Skip to "err".
+ */
+ goto err;
+ /* NOTREACHED */
+ default:
+ /* This should only happen if the verifier is somehow broken. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ /* NOTREACHED */
+ }
+
+ /* Walk free list. */
+ if ((ret =
+ __db_vrfy_freelist(dbp, vdp, meta_pgno, flags)) == DB_VERIFY_BAD)
+ isbad = 1;
+
+ /*
+ * If structure checks up until now have failed, it's likely that
+ * checking what pages have been missed will result in oodles of
+ * extraneous error messages being EPRINTed. Skip to the end
+ * if this is the case; we're going to be printing at least one
+ * error anyway, and probably all the more salient ones.
+ */
+ if (ret != 0 || isbad == 1)
+ goto err;
+
+ /*
+ * Make sure no page has been missed and that no page is still marked
+ * "all zeroes" (only certain hash pages can be, and they're unmarked
+ * in __ham_vrfy_structure).
+ */
+ for (i = 0; i < vdp->last_pgno + 1; i++) {
+ if ((ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ goto err;
+ if ((ret = __db_vrfy_pgset_get(pgset, i, &p)) != 0)
+ goto err;
+ if (p == 0) {
+ EPRINT((dbenv,
+ "Page %lu: unreferenced page", (u_long)i));
+ isbad = 1;
+ }
+
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES)) {
+ EPRINT((dbenv,
+ "Page %lu: totally zeroed page", (u_long)i));
+ isbad = 1;
+ }
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+ pip = NULL;
+ }
+
+err: if (pip != NULL)
+ (void)__db_vrfy_putpageinfo(dbenv, vdp, pip);
+
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_is_valid_pagetype
+ */
+static int
+__db_is_valid_pagetype(type)
+ u_int32_t type;
+{
+ switch (type) {
+ case P_INVALID: /* Order matches ordinal value. */
+ case P_HASH:
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_OVERFLOW:
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ case P_QAMDATA:
+ case P_LDUP:
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __db_is_valid_magicno
+ */
+static int
+__db_is_valid_magicno(magic, typep)
+ u_int32_t magic;
+ DBTYPE *typep;
+{
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ *typep = DB_BTREE;
+ return (1);
+ case DB_HASHMAGIC:
+ *typep = DB_HASH;
+ return (1);
+ case DB_QAMMAGIC:
+ *typep = DB_QUEUE;
+ return (1);
+ }
+ *typep = DB_UNKNOWN;
+ return (0);
+}
+
+/*
+ * __db_vrfy_common --
+ * Verify info common to all page types.
+ */
+static int
+__db_vrfy_common(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+ u_int8_t *p;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ pip->pgno = pgno;
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /*
+ * Hash expands the table by leaving some pages between the
+ * old last and the new last totally zeroed. Its pgin function
+ * should fix things, but we might not be using that (e.g. if
+ * we're a subdatabase).
+ *
+ * Queue will create sparse files if sparse record numbers are used.
+ */
+ if (pgno != 0 && PGNO(h) == 0) {
+ for (p = (u_int8_t *)h; p < (u_int8_t *)h + dbp->pgsize; p++)
+ if (*p != 0) {
+ EPRINT((dbenv,
+ "Page %lu: partially zeroed page",
+ (u_long)pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ /*
+ * It's totally zeroed; mark it as a hash, and we'll
+ * check that that makes sense structurally later.
+ * (The queue verification doesn't care, since queues
+ * don't really have much in the way of structure.)
+ */
+ pip->type = P_HASH;
+ F_SET(pip, VRFY_IS_ALLZEROES);
+ ret = 0;
+ goto err; /* well, not really an err. */
+ }
+
+ if (PGNO(h) != pgno) {
+ EPRINT((dbenv, "Page %lu: bad page number %lu",
+ (u_long)pgno, (u_long)h->pgno));
+ ret = DB_VERIFY_BAD;
+ }
+
+ if (!__db_is_valid_pagetype(h->type)) {
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)pgno, (u_long)h->type));
+ ret = DB_VERIFY_BAD;
+ }
+ pip->type = h->type;
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_invalid --
+ * Verify P_INVALID page.
+ * (Yes, there's not much to do here.)
+ */
+static int
+__db_vrfy_invalid(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ pip->next_pgno = pip->prev_pgno = 0;
+
+ if (!IS_VALID_PGNO(NEXT_PGNO(h))) {
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
+ (u_long)pgno, (u_long)NEXT_PGNO(h)));
+ ret = DB_VERIFY_BAD;
+ } else
+ pip->next_pgno = NEXT_PGNO(h);
+
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_datapage --
+ * Verify elements common to data pages (P_HASH, P_LBTREE,
+ * P_IBTREE, P_IRECNO, P_LRECNO, P_OVERFLOW, P_DUPLICATE)--i.e.,
+ * those defined in the PAGE structure.
+ *
+ * Called from each of the per-page routines, after the
+ * all-page-type-common elements of pip have been verified and filled
+ * in.
+ *
+ * PUBLIC: int __db_vrfy_datapage
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_datapage(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * prev_pgno and next_pgno: store for inter-page checks,
+ * verify that they point to actual pages and not to self.
+ *
+ * !!!
+ * Internal btree pages do not maintain these fields (indeed,
+ * they overload them). Skip.
+ */
+ if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
+ if (!IS_VALID_PGNO(PREV_PGNO(h)) || PREV_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid prev_pgno %lu",
+ (u_long)pip->pgno, (u_long)PREV_PGNO(h)));
+ }
+ if (!IS_VALID_PGNO(NEXT_PGNO(h)) || NEXT_PGNO(h) == pip->pgno) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
+ (u_long)pip->pgno, (u_long)NEXT_PGNO(h)));
+ }
+ pip->prev_pgno = PREV_PGNO(h);
+ pip->next_pgno = NEXT_PGNO(h);
+ }
+
+ /*
+ * Verify the number of entries on the page.
+ * There is no good way to determine if this is accurate; the
+ * best we can do is verify that it's not more than can, in theory,
+ * fit on the page. Then, we make sure there are at least
+ * this many valid elements in inp[], and hope that this catches
+ * most cases.
+ */
+ if (TYPE(h) != P_OVERFLOW) {
+ if (BKEYDATA_PSIZE(0) * NUM_ENT(h) > dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: too many entries: %lu",
+ (u_long)pgno, (u_long)NUM_ENT(h)));
+ }
+ pip->entries = NUM_ENT(h);
+ }
+
+ /*
+ * btree level. Should be zero unless we're a btree;
+ * if we are a btree, should be between LEAFLEVEL and MAXBTREELEVEL,
+ * and we need to save it off.
+ */
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if (LEVEL(h) < LEAFLEVEL + 1 || LEVEL(h) > MAXBTREELEVEL) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad btree level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ pip->bt_level = LEVEL(h);
+ break;
+ case P_LBTREE:
+ case P_LDUP:
+ case P_LRECNO:
+ if (LEVEL(h) != LEAFLEVEL) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: btree leaf page has incorrect level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ break;
+ default:
+ if (LEVEL(h) != 0) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonzero level %lu in non-btree database",
+ (u_long)pgno, (u_long)LEVEL(h)));
+ }
+ break;
+ }
+
+ /*
+ * Even though inp[] occurs in all PAGEs, we look at it in the
+ * access-method-specific code, since btree and hash treat
+ * item lengths very differently, and one of the most important
+ * things we want to verify is that the data--as specified
+ * by offset and length--cover the right part of the page
+ * without overlaps, gaps, or violations of the page boundary.
+ */
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_meta--
+ * Verify the access-method common parts of a meta page, using
+ * normal mpool routines.
+ *
+ * PUBLIC: int __db_vrfy_meta
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ DBMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBTYPE dbtype, magtype;
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* type plausible for a meta page */
+ switch (meta->type) {
+ case P_BTREEMETA:
+ dbtype = DB_BTREE;
+ break;
+ case P_HASHMETA:
+ dbtype = DB_HASH;
+ break;
+ case P_QAMMETA:
+ dbtype = DB_QUEUE;
+ break;
+ default:
+ /* The verifier should never let us get here. */
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* magic number valid */
+ if (!__db_is_valid_magicno(meta->magic, &magtype)) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: invalid magic number", (u_long)pgno));
+ }
+ if (magtype != dbtype) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: magic number does not match database type",
+ (u_long)pgno));
+ }
+
+ /* version */
+ if ((dbtype == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbtype == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbtype == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: unsupported database version %lu; extraneous errors may result",
+ (u_long)pgno, (u_long)meta->version));
+ }
+
+ /* pagesize */
+ if (meta->pagesize != dbp->pgsize) {
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: invalid pagesize %lu",
+ (u_long)pgno, (u_long)meta->pagesize));
+ }
+
+ /* free list */
+ /*
+ * If this is not the main, master-database meta page, it
+ * should not have a free list.
+ */
+ if (pgno != PGNO_BASE_MD && meta->free != PGNO_INVALID) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonempty free list on subdatabase metadata page",
+ (u_long)pgno));
+ }
+
+ /* Can correctly be PGNO_INVALID--that's just the end of the list. */
+ if (meta->free != PGNO_INVALID && IS_VALID_PGNO(meta->free))
+ pip->free = meta->free;
+ else if (!IS_VALID_PGNO(meta->free)) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: nonsensical free list pgno %lu",
+ (u_long)pgno, (u_long)meta->free));
+ }
+
+ /*
+ * We have now verified the common fields of the metadata page.
+ * Clear the flag that told us they had been incompletely checked.
+ */
+ F_CLR(pip, VRFY_INCOMPLETE);
+
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_freelist --
+ * Walk free list, checking off pages and verifying absence of
+ * loops.
+ */
+static int
+__db_vrfy_freelist(dbp, vdp, meta, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t cur_pgno, next_pgno;
+ int p, ret, t_ret;
+
+ pgset = vdp->pgset;
+ DB_ASSERT(pgset != NULL);
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0)
+ return (ret);
+ for (next_pgno = pip->free;
+ next_pgno != PGNO_INVALID; next_pgno = pip->next_pgno) {
+ cur_pgno = pip->pgno;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+
+ /* This shouldn't happen, but just in case. */
+ if (!IS_VALID_PGNO(next_pgno)) {
+ EPRINT((dbenv,
+ "Page %lu: invalid next_pgno %lu on free list page",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Detect cycles. */
+ if ((ret = __db_vrfy_pgset_get(pgset, next_pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbenv,
+ "Page %lu: page %lu encountered a second time on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, next_pgno)) != 0)
+ return (ret);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_INVALID) {
+ EPRINT((dbenv,
+ "Page %lu: non-invalid page %lu on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
+ ret = DB_VERIFY_BAD; /* unsafe to continue */
+ break;
+ }
+ }
+
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_vrfy_subdbs --
+ * Walk the known-safe master database of subdbs with a cursor,
+ * verifying the structure of each subdatabase we encounter.
+ */
+static int
+__db_vrfy_subdbs(dbp, vdp, dbname, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *dbname;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DBC *dbc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ db_pgno_t meta_pgno;
+ int ret, t_ret, isbad;
+ u_int8_t type;
+
+ isbad = 0;
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+
+ if ((ret =
+ __db_master_open(dbp, NULL, dbname, DB_RDONLY, 0, &mdbp)) != 0)
+ return (ret);
+
+ if ((ret = __db_icursor(mdbp,
+ NULL, DB_BTREE, PGNO_INVALID, 0, DB_LOCK_INVALIDID, &dbc)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbenv,
+ "Subdatabase entry not page-number size"));
+ isbad = 1;
+ goto err;
+ }
+ memcpy(&meta_pgno, data.data, data.size);
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+ if (meta_pgno == PGNO_INVALID || meta_pgno > vdp->last_pgno) {
+ EPRINT((dbenv,
+ "Subdatabase entry references invalid page %lu",
+ (u_long)meta_pgno));
+ isbad = 1;
+ goto err;
+ }
+ if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
+ goto err;
+ type = pip->type;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ goto err;
+ switch (type) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_structure(
+ dbp, vdp, meta_pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ break;
+ case P_QAMMETA:
+ default:
+ EPRINT((dbenv,
+ "Subdatabase entry references page %lu of invalid type %lu",
+ (u_long)meta_pgno, (u_long)type));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __db_vrfy_struct_feedback --
+ * Provide feedback during top-down database structure traversal.
+ * (See comment at the beginning of __db_vrfy_structure.)
+ *
+ * PUBLIC: void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+ */
+void
+__db_vrfy_struct_feedback(dbp, vdp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+{
+ int progress;
+
+ if (dbp->db_feedback == NULL)
+ return;
+
+ if (vdp->pgs_remaining > 0)
+ vdp->pgs_remaining--;
+
+ /* Don't allow a feedback call of 100 until we're really done. */
+ progress = 100 - (vdp->pgs_remaining * 50 / (vdp->last_pgno + 1));
+ dbp->db_feedback(dbp, DB_VERIFY, progress == 100 ? 99 : progress);
+}
+
+/*
+ * __db_vrfy_orderchkonly --
+ * Do an sort-order/hashing check on a known-otherwise-good subdb.
+ */
+static int
+__db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ BTMETA *btmeta;
+ DB *mdbp, *pgset;
+ DBC *pgsc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ HASH *h_internal;
+ HMETA *hmeta;
+ PAGE *h, *currpg;
+ db_pgno_t meta_pgno, p, pgno;
+ u_int32_t bucket;
+ int t_ret, ret;
+
+ pgset = NULL;
+ pgsc = NULL;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ currpg = h = NULL;
+
+ LF_CLR(DB_NOORDERCHK);
+
+ /* Open the master database and get the meta_pgno for the subdb. */
+ if ((ret = db_create(&mdbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = __db_master_open(dbp, NULL, name, DB_RDONLY, 0, &mdbp)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(key));
+ key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
+ memset(&data, 0, sizeof(data));
+ if ((ret = mdbp->get(mdbp, NULL, &key, &data, 0)) != 0)
+ goto err;
+
+ if (data.size != sizeof(db_pgno_t)) {
+ EPRINT((dbenv, "Subdatabase entry of invalid size"));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ memcpy(&meta_pgno, data.data, data.size);
+
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
+ goto err;
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ btmeta = (BTMETA *)h;
+ if (F_ISSET(&btmeta->dbmeta, BTM_RECNO)) {
+ /* Recnos have no order to check. */
+ ret = 0;
+ goto err;
+ }
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, meta_pgno, flags, pgset)) != 0)
+ goto err;
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy_itemorder(dbp,
+ NULL, currpg, p, NUM_ENT(currpg), 1,
+ F_ISSET(&btmeta->dbmeta, BTM_DUP), flags)) != 0)
+ goto err;
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+
+ /*
+ * The normal exit condition for the loop above is DB_NOTFOUND.
+ * If we see that, zero it and continue on to cleanup.
+ * Otherwise, it's a real error and will be returned.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ break;
+ case P_HASHMETA:
+ hmeta = (HMETA *)h;
+ h_internal = (HASH *)dbp->h_internal;
+ /*
+ * Make sure h_charkey is right.
+ */
+ if (h_internal == NULL) {
+ EPRINT((dbenv,
+ "Page %lu: DB->h_internal field is NULL",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ if (h_internal->h_hash == NULL)
+ h_internal->h_hash = hmeta->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (hmeta->h_charkey !=
+ h_internal->h_hash(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbenv,
+ "Page %lu: incorrect hash function for database",
+ (u_long)meta_pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Foreach bucket, verify hashing on each page in the
+ * corresponding chain of pages.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+ while (pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf,
+ &pgno, 0, &currpg)) != 0)
+ goto err;
+ if ((ret = __ham_vrfy_hashing(dbp,
+ NUM_ENT(currpg), hmeta, bucket, pgno,
+ flags, h_internal->h_hash)) != 0)
+ goto err;
+ pgno = NEXT_PGNO(currpg);
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
+ goto err;
+ currpg = NULL;
+ }
+ }
+ break;
+ default:
+ EPRINT((dbenv, "Page %lu: database metapage of bad type %lu",
+ (u_long)meta_pgno, (u_long)TYPE(h)));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if (pgsc != NULL && (t_ret = pgsc->c_close(pgsc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pgset != NULL &&
+ (t_ret = pgset->close(pgset, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
+ ret = t_ret;
+ if (currpg != NULL && (t_ret = mpf->put(mpf, currpg, 0)) != 0)
+ ret = t_ret;
+ if ((t_ret = mdbp->close(mdbp, 0)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage --
+ * Walk through a page, salvaging all likely or plausible (w/
+ * DB_AGGRESSIVE) key/data pairs.
+ */
+static int
+__db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_ASSERT(LF_ISSET(DB_SALVAGE));
+
+ /* If we got this page in the subdb pass, we can safely skip it. */
+ if (__db_salvage_isdone(vdp, pgno))
+ return (0);
+
+ switch (TYPE(h)) {
+ case P_HASH:
+ return (__ham_salvage(dbp,
+ vdp, pgno, h, handle, callback, flags));
+ /* NOTREACHED */
+ case P_LBTREE:
+ return (__bam_salvage(dbp,
+ vdp, pgno, P_LBTREE, h, handle, callback, NULL, flags));
+ /* NOTREACHED */
+ case P_LDUP:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LDUP));
+ /* NOTREACHED */
+ case P_OVERFLOW:
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_OVERFLOW));
+ /* NOTREACHED */
+ case P_LRECNO:
+ /*
+ * Recnos are tricky -- they may represent dup pages, or
+ * they may be subdatabase/regular database pages in their
+ * own right. If the former, they need to be printed with a
+ * key, preferably when we hit the corresponding datum in
+ * a btree/hash page. If the latter, there is no key.
+ *
+ * If a database is sufficiently frotzed, we're not going
+ * to be able to get this right, so we best-guess: just
+ * mark it needed now, and if we're really a normal recno
+ * database page, the "unknowns" pass will pick us up.
+ */
+ return (__db_salvage_markneeded(vdp, pgno, SALVAGE_LRECNO));
+ /* NOTREACHED */
+ case P_IBTREE:
+ case P_INVALID:
+ case P_IRECNO:
+ case __P_DUPLICATE:
+ default:
+ /* XXX: Should we be more aggressive here? */
+ break;
+ }
+ return (0);
+}
+
+/*
+ * __db_salvage_unknowns --
+ * Walk through the salvager database, printing with key "UNKNOWN"
+ * any pages we haven't dealt with.
+ */
+static int
+__db_salvage_unknowns(dbp, vdp, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT unkdbt, key, *dbt;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+ int ret, err_ret;
+ void *ovflbuf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0)
+ return (ret);
+
+ err_ret = 0;
+ while ((ret = __db_salvage_getnext(vdp, &pgno, &pgtype)) == 0) {
+ dbt = NULL;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ switch (pgtype) {
+ case SALVAGE_LDUP:
+ case SALVAGE_LRECNODUP:
+ dbt = &unkdbt;
+ /* FALLTHROUGH */
+ case SALVAGE_LBTREE:
+ case SALVAGE_LRECNO:
+ if ((ret = __bam_salvage(dbp, vdp, pgno, pgtype,
+ h, handle, callback, dbt, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_OVERFLOW:
+ /*
+ * XXX:
+ * This may generate multiple "UNKNOWN" keys in
+ * a database with no dups. What to do?
+ */
+ if ((ret = __db_safe_goff(dbp,
+ vdp, pgno, &key, &ovflbuf, flags)) != 0 ||
+ (ret = __db_prdbt(&key,
+ 0, " ", handle, callback, 0, vdp)) != 0 ||
+ (ret = __db_prdbt(&unkdbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_HASH:
+ if ((ret = __ham_salvage(
+ dbp, vdp, pgno, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+ break;
+ case SALVAGE_INVALID:
+ case SALVAGE_IGNORE:
+ default:
+ /*
+ * Shouldn't happen, but if it does, just do what the
+ * nice man says.
+ */
+ DB_ASSERT(0);
+ break;
+ }
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ __os_free(dbenv, ovflbuf);
+
+ if (err_ret != 0 && ret == 0)
+ ret = err_ret;
+
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
+
+/*
+ * Offset of the ith inp array entry, which we can compare to the offset
+ * the entry stores.
+ */
+#define INP_OFFSET(dbp, h, i) \
+ ((db_indx_t)((u_int8_t *)((P_INP(dbp,(h))) + (i)) - (u_int8_t *)(h)))
+
+/*
+ * __db_vrfy_inpitem --
+ * Verify that a single entry in the inp array is sane, and update
+ * the high water mark and current item offset. (The former of these is
+ * used for state information between calls, and is required; it must
+ * be initialized to the pagesize before the first call.)
+ *
+ * Returns DB_VERIFY_FATAL if inp has collided with the data,
+ * since verification can't continue from there; returns DB_VERIFY_BAD
+ * if anything else is wrong.
+ *
+ * PUBLIC: int __db_vrfy_inpitem __P((DB *, PAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+ */
+int
+__db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
+ DB *dbp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t i;
+ int is_btree;
+ u_int32_t flags, *himarkp, *offsetp;
+{
+ BKEYDATA *bk;
+ DB_ENV *dbenv;
+ db_indx_t *inp, offset, len;
+
+ dbenv = dbp->dbenv;
+
+ DB_ASSERT(himarkp != NULL);
+ inp = P_INP(dbp, h);
+
+ /*
+ * Check that the inp array, which grows from the beginning of the
+ * page forward, has not collided with the data, which grow from the
+ * end of the page backward.
+ */
+ if (inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) {
+ /* We've collided with the data. We need to bail. */
+ EPRINT((dbenv, "Page %lu: entries listing %lu overlaps data",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_FATAL);
+ }
+
+ offset = inp[i];
+
+ /*
+ * Check that the item offset is reasonable: it points somewhere
+ * after the inp array and before the end of the page.
+ */
+ if (offset <= INP_OFFSET(dbp, h, i) || offset > dbp->pgsize) {
+ EPRINT((dbenv, "Page %lu: bad offset %lu at page index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ /* Update the high-water mark (what HOFFSET should be) */
+ if (offset < *himarkp)
+ *himarkp = offset;
+
+ if (is_btree) {
+ /*
+ * Check that the item length remains on-page.
+ */
+ bk = GET_BKEYDATA(dbp, h, i);
+
+ /*
+ * We need to verify the type of the item here;
+ * we can't simply assume that it will be one of the
+ * expected three. If it's not a recognizable type,
+ * it can't be considered to have a verifiable
+ * length, so it's not possible to certify it as safe.
+ */
+ switch (B_TYPE(bk->type)) {
+ case B_KEYDATA:
+ len = bk->len;
+ break;
+ case B_DUPLICATE:
+ case B_OVERFLOW:
+ len = BOVERFLOW_SIZE;
+ break;
+ default:
+ EPRINT((dbenv,
+ "Page %lu: item %lu of unrecognizable type",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ if ((size_t)(offset + len) > dbp->pgsize) {
+ EPRINT((dbenv,
+ "Page %lu: item %lu extends past page boundary",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ if (offsetp != NULL)
+ *offsetp = offset;
+ return (0);
+}
+
+/*
+ * __db_vrfy_duptype--
+ * Given a page number and a set of flags to __bam_vrfy_subtree,
+ * verify that the dup tree type is correct--i.e., it's a recno
+ * if DUPSORT is not set and a btree if it is.
+ *
+ * PUBLIC: int __db_vrfy_duptype
+ * PUBLIC: __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_vrfy_duptype(dbp, vdp, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO *pip;
+ int ret, isbad;
+
+ dbenv = dbp->dbenv;
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (pip->type) {
+ case P_IBTREE:
+ case P_LDUP:
+ if (!LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbenv,
+ "Page %lu: sorted duplicate set in unsorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ case P_IRECNO:
+ case P_LRECNO:
+ if (LF_ISSET(ST_DUPSORT)) {
+ EPRINT((dbenv,
+ "Page %lu: unsorted duplicate set in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+ break;
+ default:
+ /*
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
+ */
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbenv, pgno, "duplicate page");
+ else
+ EPRINT((dbenv,
+ "Page %lu: duplicate page of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
+ isbad = 1;
+ break;
+ }
+
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
+
+/*
+ * __db_salvage_duptree --
+ * Attempt to salvage a given duplicate tree, given its alleged root.
+ *
+ * The key that corresponds to this dup set has been passed to us
+ * in DBT *key. Because data items follow keys, though, it has been
+ * printed once already.
+ *
+ * The basic idea here is that pgno ought to be a P_LDUP, a P_LRECNO, a
+ * P_IBTREE, or a P_IRECNO. If it's an internal page, use the verifier
+ * functions to make sure it's safe; if it's not, we simply bail and the
+ * data will have to be printed with no key later on. if it is safe,
+ * recurse on each of its children.
+ *
+ * Whether or not it's safe, if it's a leaf page, __bam_salvage it.
+ *
+ * At all times, use the DB hanging off vdp to mark and check what we've
+ * done, so each page gets printed exactly once and we don't get caught
+ * in any cycles.
+ *
+ * PUBLIC: int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: DBT *, void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ DBT *key;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if (pgno == PGNO_INVALID || !IS_VALID_PGNO(pgno))
+ return (DB_VERIFY_BAD);
+
+ /* We have a plausible page. Try it. */
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_IBTREE:
+ case P_IRECNO:
+ if ((ret = __db_vrfy_common(dbp, vdp, h, pgno, flags)) != 0)
+ goto err;
+ if ((ret = __bam_vrfy(dbp,
+ vdp, h, pgno, flags | DB_NOORDERCHK)) != 0 ||
+ (ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ goto err;
+ /*
+ * We have a known-healthy internal page. Walk it.
+ */
+ if ((ret = __bam_salvage_walkdupint(dbp, vdp, h, key,
+ handle, callback, flags)) != 0)
+ goto err;
+ break;
+ case P_LRECNO:
+ case P_LDUP:
+ if ((ret = __bam_salvage(dbp,
+ vdp, pgno, TYPE(h), h, handle, callback, key, flags)) != 0)
+ goto err;
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ goto err;
+ /* NOTREACHED */
+ }
+
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbs --
+ * Check and see if this database has subdbs; if so, try to salvage
+ * them independently.
+ */
+static int
+__db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+ int *hassubsp;
+{
+ BTMETA *btmeta;
+ DB *pgset;
+ DBC *pgsc;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t p, meta_pgno;
+ int ret, err_ret;
+
+ pgset = NULL;
+ pgsc = NULL;
+ mpf = dbp->mpf;
+ err_ret = 0;
+
+ meta_pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
+ return (ret);
+
+ if (TYPE(h) == P_BTREEMETA)
+ btmeta = (BTMETA *)h;
+ else {
+ /* Not a btree metadata, ergo no subdbs, so just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* If it's not a safe page, bail on the attempt. */
+ if ((ret = __db_vrfy_common(dbp, vdp, h, PGNO_BASE_MD, flags)) != 0 ||
+ (ret = __bam_vrfy_meta(dbp, vdp, btmeta, PGNO_BASE_MD, flags)) != 0)
+ goto err;
+
+ if (!F_ISSET(&btmeta->dbmeta, BTM_SUBDB)) {
+ /* No subdbs, just return. */
+ ret = 0;
+ goto err;
+ }
+
+ /* We think we've got subdbs. Mark it so. */
+ *hassubsp = 1;
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+
+ /*
+ * We have subdbs. Try to crack them.
+ *
+ * To do so, get a set of leaf pages in the master
+ * database, and then walk each of the valid ones, salvaging
+ * subdbs as we go. If any prove invalid, just drop them; we'll
+ * pick them up on a later pass.
+ */
+ if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+ if ((ret =
+ __db_meta2pgset(dbp, vdp, PGNO_BASE_MD, flags, pgset)) != 0)
+ goto err;
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &h)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_vrfy_common(dbp, vdp, h, p, flags)) != 0 ||
+ (ret = __bam_vrfy(dbp,
+ vdp, h, p, flags | DB_NOORDERCHK)) != 0)
+ goto nextpg;
+ if (TYPE(h) != P_LBTREE)
+ goto nextpg;
+ else if ((ret = __db_salvage_subdbpg(
+ dbp, vdp, h, handle, callback, flags)) != 0)
+ err_ret = ret;
+nextpg: if ((ret = mpf->put(mpf, h, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+
+ ret = pgset->close(pgset, 0);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+
+ /* NOTREACHED */
+
+err: if (pgsc != NULL)
+ (void)pgsc->c_close(pgsc);
+ if (pgset != NULL)
+ (void)pgset->close(pgset, 0);
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_subdbpg --
+ * Given a known-good leaf page in the master database, salvage all
+ * leaf pages corresponding to each subdb.
+ */
+static int
+__db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *master;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ BKEYDATA *bkkey, *bkdata;
+ BOVERFLOW *bo;
+ DB *pgset;
+ DBC *pgsc;
+ DBT key;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ PAGE *subpg;
+ db_indx_t i;
+ db_pgno_t meta_pgno, p;
+ int ret, err_ret, t_ret;
+ char *subdbname;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ ret = err_ret = 0;
+ subdbname = NULL;
+
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
+ return (ret);
+
+ /*
+ * For each entry, get and salvage the set of pages
+ * corresponding to that entry.
+ */
+ for (i = 0; i < NUM_ENT(master); i += P_INDX) {
+ bkkey = GET_BKEYDATA(dbp, master, i);
+ bkdata = GET_BKEYDATA(dbp, master, i + O_INDX);
+
+ /* Get the subdatabase name. */
+ if (B_TYPE(bkkey->type) == B_OVERFLOW) {
+ /*
+ * We can, in principle anyway, have a subdb
+ * name so long it overflows. Ick.
+ */
+ bo = (BOVERFLOW *)bkkey;
+ if ((ret = __db_safe_goff(dbp, vdp, bo->pgno, &key,
+ (void **)&subdbname, flags)) != 0) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Nul-terminate it. */
+ if ((ret = __os_realloc(dbenv,
+ key.size + 1, &subdbname)) != 0)
+ goto err;
+ subdbname[key.size] = '\0';
+ } else if (B_TYPE(bkkey->type == B_KEYDATA)) {
+ if ((ret = __os_realloc(dbenv,
+ bkkey->len + 1, &subdbname)) != 0)
+ goto err;
+ memcpy(subdbname, bkkey->data, bkkey->len);
+ subdbname[bkkey->len] = '\0';
+ }
+
+ /* Get the corresponding pgno. */
+ if (bkdata->len != sizeof(db_pgno_t)) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&meta_pgno, bkdata->data, sizeof(db_pgno_t));
+
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
+ /* If we can't get the subdb meta page, just skip the subdb. */
+ if (!IS_VALID_PGNO(meta_pgno) ||
+ (ret = mpf->get(mpf, &meta_pgno, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /*
+ * Verify the subdatabase meta page. This has two functions.
+ * First, if it's bad, we have no choice but to skip the subdb
+ * and let the pages just get printed on a later pass. Second,
+ * the access-method-specific meta verification routines record
+ * the various state info (such as the presence of dups)
+ * that we need for __db_prheader().
+ */
+ if ((ret =
+ __db_vrfy_common(dbp, vdp, subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ switch (TYPE(subpg)) {
+ case P_BTREEMETA:
+ if ((ret = __bam_vrfy_meta(dbp,
+ vdp, (BTMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ break;
+ case P_HASHMETA:
+ if ((ret = __ham_vrfy_meta(dbp,
+ vdp, (HMETA *)subpg, meta_pgno, flags)) != 0) {
+ err_ret = ret;
+ (void)mpf->put(mpf, subpg, 0);
+ continue;
+ }
+ break;
+ default:
+ /* This isn't an appropriate page; skip this subdb. */
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ /* NOTREACHED */
+ }
+
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ /* Print a subdatabase header. */
+ if ((ret = __db_prheader(dbp,
+ subdbname, 0, 0, handle, callback, vdp, meta_pgno)) != 0)
+ goto err;
+
+ if ((ret = __db_meta2pgset(dbp, vdp, meta_pgno,
+ flags, pgset)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+
+ if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
+ goto err;
+ while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &subpg)) != 0) {
+ err_ret = ret;
+ continue;
+ }
+ if ((ret = __db_salvage(dbp, vdp, p, subpg,
+ handle, callback, flags)) != 0)
+ err_ret = ret;
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0)
+ err_ret = ret;
+ }
+
+ if (ret != DB_NOTFOUND)
+ goto err;
+
+ if ((ret = pgsc->c_close(pgsc)) != 0)
+ goto err;
+ if ((ret = __db_prfooter(handle, callback)) != 0)
+ goto err;
+ }
+err: if (subdbname)
+ __os_free(dbenv, subdbname);
+
+ if ((t_ret = pgset->close(pgset, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = __db_salvage_markdone(vdp, PGNO(master))) != 0)
+ return (t_ret);
+
+ return ((err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __db_meta2pgset --
+ * Given a known-safe meta page number, return the set of pages
+ * corresponding to the database it represents. Return DB_VERIFY_BAD if
+ * it's not a suitable meta page or is invalid.
+ */
+static int
+__db_meta2pgset(dbp, vdp, pgno, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ DB *pgset;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ switch (TYPE(h)) {
+ case P_BTREEMETA:
+ ret = __bam_meta2pgset(dbp, vdp, (BTMETA *)h, flags, pgset);
+ break;
+ case P_HASHMETA:
+ ret = __ham_meta2pgset(dbp, vdp, (HMETA *)h, flags, pgset);
+ break;
+ default:
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_guesspgsize --
+ * Try to guess what the pagesize is if the one on the meta page
+ * and the one in the db are invalid.
+ */
+static int
+__db_guesspgsize(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ db_pgno_t i;
+ size_t nr;
+ u_int32_t guess;
+ u_int8_t type;
+
+ for (guess = DB_MAX_PGSIZE; guess >= DB_MIN_PGSIZE; guess >>= 1) {
+ /*
+ * We try to read three pages ahead after the first one
+ * and make sure we have plausible types for all of them.
+ * If the seeks fail, continue with a smaller size;
+ * we're probably just looking past the end of the database.
+ * If they succeed and the types are reasonable, also continue
+ * with a size smaller; we may be looking at pages N,
+ * 2N, and 3N for some N > 1.
+ *
+ * As soon as we hit an invalid type, we stop and return
+ * our previous guess; that last one was probably the page size.
+ */
+ for (i = 1; i <= 3; i++) {
+ if (__os_seek(dbenv, fhp, guess,
+ i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET) != 0)
+ break;
+ if (__os_read(dbenv,
+ fhp, &type, 1, &nr) != 0 || nr == 0)
+ break;
+ if (type == P_INVALID || type >= P_PAGETYPE_MAX)
+ return (guess << 1);
+ }
+ }
+
+ /*
+ * If we're just totally confused--the corruption takes up most of the
+ * beginning pages of the database--go with the default size.
+ */
+ return (DB_DEF_IOSIZE);
+}
diff --git a/storage/bdb/db/db_vrfyutil.c b/storage/bdb/db/db_vrfyutil.c
new file mode 100644
index 00000000000..44344ceed11
--- /dev/null
+++ b/storage/bdb/db/db_vrfyutil.c
@@ -0,0 +1,872 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_vrfyutil.c,v 11.29 2002/08/08 03:57:50 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_vrfyutil.c,v 11.29 2002/08/08 03:57:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/db_am.h"
+
+static int __db_vrfy_pageinfo_create __P((DB_ENV *, VRFY_PAGEINFO **));
+static int __db_vrfy_pgset_iinc __P((DB *, db_pgno_t, int));
+
+/*
+ * __db_vrfy_dbinfo_create --
+ * Allocate and initialize a VRFY_DBINFO structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_create
+ * PUBLIC: __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+ */
+int
+__db_vrfy_dbinfo_create(dbenv, pgsize, vdpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ VRFY_DBINFO **vdpp;
+{
+ DB *cdbp, *pgdbp, *pgset;
+ VRFY_DBINFO *vdp;
+ int ret;
+
+ vdp = NULL;
+ cdbp = pgdbp = pgset = NULL;
+
+ if ((ret = __os_calloc(NULL,
+ 1, sizeof(VRFY_DBINFO), (void **)&vdp)) != 0)
+ goto err;
+
+ if ((ret = db_create(&cdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_flags(cdbp, DB_DUP)) != 0)
+ goto err;
+
+ if ((ret = cdbp->set_pagesize(cdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret =
+ cdbp->open(cdbp, NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = db_create(&pgdbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = pgdbp->set_pagesize(pgdbp, pgsize)) != 0)
+ goto err;
+
+ if ((ret = pgdbp->open(pgdbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ goto err;
+
+ if ((ret = __db_vrfy_pgset(dbenv, pgsize, &pgset)) != 0)
+ goto err;
+
+ LIST_INIT(&vdp->subdbs);
+ LIST_INIT(&vdp->activepips);
+
+ vdp->cdbp = cdbp;
+ vdp->pgdbp = pgdbp;
+ vdp->pgset = pgset;
+ *vdpp = vdp;
+ return (0);
+
+err: if (cdbp != NULL)
+ (void)cdbp->close(cdbp, 0);
+ if (pgdbp != NULL)
+ (void)pgdbp->close(pgdbp, 0);
+ if (vdp != NULL)
+ __os_free(dbenv, vdp);
+ return (ret);
+}
+
+/*
+ * __db_vrfy_dbinfo_destroy --
+ * Destructor for VRFY_DBINFO. Destroys VRFY_PAGEINFOs and deallocates
+ * structure.
+ *
+ * PUBLIC: int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *));
+ */
+int
+__db_vrfy_dbinfo_destroy(dbenv, vdp)
+ DB_ENV *dbenv;
+ VRFY_DBINFO *vdp;
+{
+ VRFY_CHILDINFO *c, *d;
+ int t_ret, ret;
+
+ ret = 0;
+
+ for (c = LIST_FIRST(&vdp->subdbs); c != NULL; c = d) {
+ d = LIST_NEXT(c, links);
+ __os_free(NULL, c);
+ }
+
+ if ((t_ret = vdp->pgdbp->close(vdp->pgdbp, 0)) != 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->cdbp->close(vdp->cdbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = vdp->pgset->close(vdp->pgset, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ DB_ASSERT(LIST_FIRST(&vdp->activepips) == NULL);
+
+ __os_free(dbenv, vdp);
+ return (ret);
+}
+
+/*
+ * __db_vrfy_getpageinfo --
+ * Get a PAGEINFO structure for a given page, creating it if necessary.
+ *
+ * PUBLIC: int __db_vrfy_getpageinfo
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+ */
+int
+__db_vrfy_getpageinfo(vdp, pgno, pipp)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_PAGEINFO **pipp;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *pip;
+ int ret;
+
+ /*
+ * We want a page info struct. There are three places to get it from,
+ * in decreasing order of preference:
+ *
+ * 1. vdp->activepips. If it's already "checked out", we're
+ * already using it, we return the same exact structure with a
+ * bumped refcount. This is necessary because this code is
+ * replacing array accesses, and it's common for f() to make some
+ * changes to a pip, and then call g() and h() which each make
+ * changes to the same pip. vdps are never shared between threads
+ * (they're never returned to the application), so this is safe.
+ * 2. The pgdbp. It's not in memory, but it's in the database, so
+ * get it, give it a refcount of 1, and stick it on activepips.
+ * 3. malloc. It doesn't exist yet; create it, then stick it on
+ * activepips. We'll put it in the database when we putpageinfo
+ * later.
+ */
+
+ /* Case 1. */
+ for (pip = LIST_FIRST(&vdp->activepips); pip != NULL;
+ pip = LIST_NEXT(pip, links))
+ if (pip->pgno == pgno)
+ /* Found it. */
+ goto found;
+
+ /* Case 2. */
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ F_SET(&data, DB_DBT_MALLOC);
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = pgdbp->get(pgdbp, NULL, &key, &data, 0)) == 0) {
+ /* Found it. */
+ DB_ASSERT(data.size = sizeof(VRFY_PAGEINFO));
+ pip = data.data;
+ DB_ASSERT(pip->pi_refcount == 0);
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+ goto found;
+ } else if (ret != DB_NOTFOUND) /* Something nasty happened. */
+ return (ret);
+
+ /* Case 3 */
+ if ((ret = __db_vrfy_pageinfo_create(pgdbp->dbenv, &pip)) != 0)
+ return (ret);
+
+ LIST_INSERT_HEAD(&vdp->activepips, pip, links);
+found: pip->pi_refcount++;
+
+ *pipp = pip;
+
+ DB_ASSERT(pip->pi_refcount > 0);
+ return (0);
+}
+
+/*
+ * __db_vrfy_putpageinfo --
+ * Put back a VRFY_PAGEINFO that we're done with.
+ *
+ * PUBLIC: int __db_vrfy_putpageinfo __P((DB_ENV *,
+ * PUBLIC: VRFY_DBINFO *, VRFY_PAGEINFO *));
+ */
+int
+__db_vrfy_putpageinfo(dbenv, vdp, pip)
+ DB_ENV *dbenv;
+ VRFY_DBINFO *vdp;
+ VRFY_PAGEINFO *pip;
+{
+ DBT key, data;
+ DB *pgdbp;
+ VRFY_PAGEINFO *p;
+ int ret;
+#ifdef DIAGNOSTIC
+ int found;
+
+ found = 0;
+#endif
+
+ if (--pip->pi_refcount > 0)
+ return (0);
+
+ pgdbp = vdp->pgdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pip->pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = pip;
+ data.size = sizeof(VRFY_PAGEINFO);
+
+ if ((ret = pgdbp->put(pgdbp, NULL, &key, &data, 0)) != 0)
+ return (ret);
+
+ for (p = LIST_FIRST(&vdp->activepips); p != NULL;
+ p = LIST_NEXT(p, links))
+ if (p == pip) {
+#ifdef DIAGNOSTIC
+ found++;
+#endif
+ DB_ASSERT(p->pi_refcount == 0);
+ LIST_REMOVE(p, links);
+ break;
+ }
+#ifdef DIAGNOSTIC
+ DB_ASSERT(found == 1);
+#endif
+
+ DB_ASSERT(pip->pi_refcount == 0);
+ __os_ufree(dbenv, pip);
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset --
+ * Create a temporary database for the storing of sets of page numbers.
+ * (A mapping from page number to int, used by the *_meta2pgset functions,
+ * as well as for keeping track of which pages the verifier has seen.)
+ *
+ * PUBLIC: int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+ */
+int
+__db_vrfy_pgset(dbenv, pgsize, dbpp)
+ DB_ENV *dbenv;
+ u_int32_t pgsize;
+ DB **dbpp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->set_pagesize(dbp, pgsize)) != 0)
+ goto err;
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) == 0)
+ *dbpp = dbp;
+ else
+err: (void)dbp->close(dbp, 0);
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_pgset_get --
+ * Get the value associated in a page set with a given pgno. Return
+ * a 0 value (and succeed) if we've never heard of this page.
+ *
+ * PUBLIC: int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+ */
+int
+__db_vrfy_pgset_get(dbp, pgno, valp)
+ DB *dbp;
+ db_pgno_t pgno;
+ int *valp;
+{
+ DBT key, data;
+ int ret, val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size = sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret == DB_NOTFOUND)
+ val = 0;
+ else
+ return (ret);
+
+ *valp = val;
+ return (0);
+}
+
+/*
+ * __db_vrfy_pgset_inc --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_inc(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, 1));
+}
+
+/*
+ * __db_vrfy_pgset_dec --
+ * Increment the value associated with a pgno by 1.
+ *
+ * PUBLIC: int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+ */
+int
+__db_vrfy_pgset_dec(dbp, pgno)
+ DB *dbp;
+ db_pgno_t pgno;
+{
+
+ return (__db_vrfy_pgset_iinc(dbp, pgno, -1));
+}
+
+/*
+ * __db_vrfy_pgset_iinc --
+ * Increment the value associated with a pgno by i.
+ *
+ */
+static int
+__db_vrfy_pgset_iinc(dbp, pgno, i)
+ DB *dbp;
+ db_pgno_t pgno;
+ int i;
+{
+ DBT key, data;
+ int ret;
+ int val;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ val = 0;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+ data.data = &val;
+ data.ulen = sizeof(int);
+ F_SET(&data, DB_DBT_USERMEM);
+
+ if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
+ DB_ASSERT(data.size == sizeof(int));
+ memcpy(&val, data.data, sizeof(int));
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ data.size = sizeof(int);
+ val += i;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_vrfy_pgset_next --
+ * Given a cursor open in a pgset database, get the next page in the
+ * set.
+ *
+ * PUBLIC: int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+ */
+int
+__db_vrfy_pgset_next(dbc, pgnop)
+ DBC *dbc;
+ db_pgno_t *pgnop;
+{
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ /* We don't care about the data, just the keys. */
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ F_SET(&key, DB_DBT_USERMEM);
+ key.data = &pgno;
+ key.ulen = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) != 0)
+ return (ret);
+
+ DB_ASSERT(key.size == sizeof(db_pgno_t));
+ *pgnop = pgno;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_childcursor --
+ * Create a cursor to walk the child list with. Returns with a nonzero
+ * final argument if the specified page has no children.
+ *
+ * PUBLIC: int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+ */
+int
+__db_vrfy_childcursor(vdp, dbcp)
+ VRFY_DBINFO *vdp;
+ DBC **dbcp;
+{
+ DB *cdbp;
+ DBC *dbc;
+ int ret;
+
+ cdbp = vdp->cdbp;
+
+ if ((ret = cdbp->cursor(cdbp, NULL, &dbc, 0)) == 0)
+ *dbcp = dbc;
+
+ return (ret);
+}
+
+/*
+ * __db_vrfy_childput --
+ * Add a child structure to the set for a given page.
+ *
+ * PUBLIC: int __db_vrfy_childput
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+ */
+int
+__db_vrfy_childput(vdp, pgno, cip)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO *cip;
+{
+ DB *cdbp;
+ DBC *cc;
+ DBT key, data;
+ VRFY_CHILDINFO *oldcip;
+ int ret;
+
+ cdbp = vdp->cdbp;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * We want to avoid adding multiple entries for a single child page;
+ * we only need to verify each child once, even if a child (such
+ * as an overflow key) is multiply referenced.
+ *
+ * However, we also need to make sure that when walking the list
+ * of children, we encounter them in the order they're referenced
+ * on a page. (This permits us, for example, to verify the
+ * prev_pgno/next_pgno chain of Btree leaf pages.)
+ *
+ * Check the child database to make sure that this page isn't
+ * already a child of the specified page number. If it's not,
+ * put it at the end of the duplicate set.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ return (ret);
+ for (ret = __db_vrfy_ccset(cc, pgno, &oldcip); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &oldcip))
+ if (oldcip->pgno == cip->pgno) {
+ /*
+ * Found a matching child. Return without
+ * putting it again.
+ */
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+ return (0);
+ }
+ if (ret != DB_NOTFOUND) {
+ (void)__db_vrfy_ccclose(cc);
+ return (ret);
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+
+ data.data = cip;
+ data.size = sizeof(VRFY_CHILDINFO);
+
+ return (cdbp->put(cdbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_vrfy_ccset --
+ * Sets a cursor created with __db_vrfy_childcursor to the first
+ * child of the given pgno, and returns it in the third arg.
+ *
+ * PUBLIC: int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccset(dbc, pgno, cipp)
+ DBC *dbc;
+ db_pgno_t pgno;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_SET)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccnext --
+ * Gets the next child of the given cursor created with
+ * __db_vrfy_childcursor, and returns it in the memory provided in the
+ * second arg.
+ *
+ * PUBLIC: int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+ */
+int
+__db_vrfy_ccnext(dbc, cipp)
+ DBC *dbc;
+ VRFY_CHILDINFO **cipp;
+{
+ DBT key, data;
+ int ret;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT_DUP)) != 0)
+ return (ret);
+
+ DB_ASSERT(data.size == sizeof(VRFY_CHILDINFO));
+ *cipp = (VRFY_CHILDINFO *)data.data;
+
+ return (0);
+}
+
+/*
+ * __db_vrfy_ccclose --
+ * Closes the cursor created with __db_vrfy_childcursor.
+ *
+ * This doesn't actually do anything interesting now, but it's
+ * not inconceivable that we might change the internal database usage
+ * and keep the interfaces the same, and a function call here or there
+ * seldom hurts anyone.
+ *
+ * PUBLIC: int __db_vrfy_ccclose __P((DBC *));
+ */
+int
+__db_vrfy_ccclose(dbc)
+ DBC *dbc;
+{
+
+ return (dbc->c_close(dbc));
+}
+
+/*
+ * __db_vrfy_pageinfo_create --
+ * Constructor for VRFY_PAGEINFO; allocates and initializes.
+ */
+static int
+__db_vrfy_pageinfo_create(dbenv, pgipp)
+ DB_ENV *dbenv;
+ VRFY_PAGEINFO **pgipp;
+{
+ VRFY_PAGEINFO *pgip;
+ int ret;
+
+ /*
+ * pageinfo structs are sometimes allocated here and sometimes
+ * allocated by fetching them from a database with DB_DBT_MALLOC.
+ * There's no easy way for the destructor to tell which was
+ * used, and so we always allocate with __os_umalloc so we can free
+ * with __os_ufree.
+ */
+ if ((ret = __os_umalloc(dbenv,
+ sizeof(VRFY_PAGEINFO), (void **)&pgip)) != 0)
+ return (ret);
+ memset(pgip, 0, sizeof(VRFY_PAGEINFO));
+
+ DB_ASSERT(pgip->pi_refcount == 0);
+
+ *pgipp = pgip;
+ return (0);
+}
+
+/*
+ * __db_salvage_init --
+ * Set up salvager database.
+ *
+ * PUBLIC: int __db_salvage_init __P((VRFY_DBINFO *));
+ */
+int
+__db_salvage_init(vdp)
+ VRFY_DBINFO *vdp;
+{
+ DB *dbp;
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0)) != 0)
+ goto err;
+
+ vdp->salvage_pages = dbp;
+ return (0);
+
+err: (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __db_salvage_destroy --
+ * Close salvager database.
+ * PUBLIC: void __db_salvage_destroy __P((VRFY_DBINFO *));
+ */
+void
+__db_salvage_destroy(vdp)
+ VRFY_DBINFO *vdp;
+{
+ (void)vdp->salvage_pages->close(vdp->salvage_pages, 0);
+}
+
+/*
+ * __db_salvage_getnext --
+ * Get the next (first) unprinted page in the database of pages we need to
+ * print still. Delete entries for any already-printed pages we encounter
+ * in this search, as well as the page we're returning.
+ *
+ * PUBLIC: int __db_salvage_getnext
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+ */
+int
+__db_salvage_getnext(vdp, pgnop, pgtypep)
+ VRFY_DBINFO *vdp;
+ db_pgno_t *pgnop;
+ u_int32_t *pgtypep;
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ int ret;
+ u_int32_t pgtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+ memcpy(&pgtype, data.data, sizeof(pgtype));
+
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if (pgtype != SALVAGE_IGNORE)
+ goto found;
+ }
+
+ /* No more entries--ret probably equals DB_NOTFOUND. */
+
+ if (0) {
+found: DB_ASSERT(key.size == sizeof(db_pgno_t));
+ DB_ASSERT(data.size == sizeof(u_int32_t));
+
+ *pgnop = *(db_pgno_t *)key.data;
+ *pgtypep = *(u_int32_t *)data.data;
+ }
+
+err: (void)dbc->c_close(dbc);
+ return (ret);
+}
+
+/*
+ * __db_salvage_isdone --
+ * Return whether or not the given pgno is already marked
+ * SALVAGE_IGNORE (meaning that we don't need to print it again).
+ *
+ * Returns DB_KEYEXIST if it is marked, 0 if not, or another error on
+ * error.
+ *
+ * PUBLIC: int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_isdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int ret;
+ u_int32_t currtype;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done.
+ */
+ ret = dbp->get(dbp, NULL, &key, &data, 0);
+ if (ret == 0) {
+ /*
+ * The key's already here. Check and see if it's already
+ * marked done. If it is, return DB_KEYEXIST. If it's not,
+ * return 0.
+ */
+ if (currtype == SALVAGE_IGNORE)
+ return (DB_KEYEXIST);
+ else
+ return (0);
+ } else if (ret != DB_NOTFOUND)
+ return (ret);
+
+ /* The pgno is not yet marked anything; return 0. */
+ return (0);
+}
+
+/*
+ * __db_salvage_markdone --
+ * Mark as done a given page.
+ *
+ * PUBLIC: int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+ */
+int
+__db_salvage_markdone(vdp, pgno)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+{
+ DBT key, data;
+ DB *dbp;
+ int pgtype, ret;
+ u_int32_t currtype;
+
+ pgtype = SALVAGE_IGNORE;
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ currtype = SALVAGE_INVALID;
+ data.data = &currtype;
+ data.ulen = sizeof(u_int32_t);
+ data.flags = DB_DBT_USERMEM;
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there and is marked done.
+ * If it's there and is marked anything else, that's fine--we
+ * want to mark it done, but db_salvage_isdone only lets
+ * us know if it's marked IGNORE.
+ *
+ * We don't want to return DB_KEYEXIST, though; this will
+ * likely get passed up all the way and make no sense to the
+ * application. Instead, use DB_VERIFY_BAD to indicate that
+ * we've seen this page already--it probably indicates a
+ * multiply-linked page.
+ */
+ if ((ret = __db_salvage_isdone(vdp, pgno)) != 0)
+ return (ret == DB_KEYEXIST ? DB_VERIFY_BAD : ret);
+
+ data.size = sizeof(u_int32_t);
+ data.data = &pgtype;
+
+ return (dbp->put(dbp, NULL, &key, &data, 0));
+}
+
+/*
+ * __db_salvage_markneeded --
+ * If it has not yet been printed, make note of the fact that a page
+ * must be dealt with later.
+ *
+ * PUBLIC: int __db_salvage_markneeded
+ * PUBLIC: __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+ */
+int
+__db_salvage_markneeded(vdp, pgno, pgtype)
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = vdp->salvage_pages;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ key.data = &pgno;
+ key.size = sizeof(db_pgno_t);
+
+ data.data = &pgtype;
+ data.size = sizeof(u_int32_t);
+
+ /*
+ * Put an entry for this page, with pgno as key and type as data,
+ * unless it's already there, in which case it's presumably
+ * already been marked done.
+ */
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ return (ret == DB_KEYEXIST ? 0 : ret);
+}
diff --git a/storage/bdb/db185/db185.c b/storage/bdb/db185/db185.c
new file mode 100644
index 00000000000..99d37bcf341
--- /dev/null
+++ b/storage/bdb/db185/db185.c
@@ -0,0 +1,594 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db185.c,v 11.28 2002/05/09 01:55:14 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db185_int.h"
+
+static int db185_close __P((DB185 *));
+static int db185_compare __P((DB *, const DBT *, const DBT *));
+static int db185_del __P((const DB185 *, const DBT185 *, u_int));
+static int db185_fd __P((const DB185 *));
+static int db185_get __P((const DB185 *, const DBT185 *, DBT185 *, u_int));
+static u_int32_t
+ db185_hash __P((DB *, const void *, u_int32_t));
+static void db185_openstderr __P((DB_FH *));
+static size_t db185_prefix __P((DB *, const DBT *, const DBT *));
+static int db185_put __P((const DB185 *, DBT185 *, const DBT185 *, u_int));
+static int db185_seq __P((const DB185 *, DBT185 *, DBT185 *, u_int));
+static int db185_sync __P((const DB185 *, u_int));
+
+/*
+ * EXTERN: #ifdef _DB185_INT_H_
+ * EXTERN: DB185 *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #else
+ * EXTERN: DB *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #endif
+ */
+DB185 *
+__db185_open(file, oflags, mode, type, openinfo)
+ const char *file;
+ int oflags, mode;
+ DBTYPE type;
+ const void *openinfo;
+{
+ const BTREEINFO *bi;
+ const HASHINFO *hi;
+ const RECNOINFO *ri;
+ DB *dbp;
+ DB185 *db185p;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = NULL;
+ db185p = NULL;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(DB185), &db185p)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The DBTYPE enum wasn't initialized in DB 185, so it's off-by-one
+ * from DB 2.0.
+ */
+ switch (type) {
+ case 0: /* DB_BTREE */
+ type = DB_BTREE;
+ if ((bi = openinfo) != NULL) {
+ if (bi->flags & ~R_DUP)
+ goto einval;
+ if (bi->flags & R_DUP)
+ (void)dbp->set_flags(dbp, DB_DUP);
+ if (bi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, bi->cachesize, 0);
+ if (bi->minkeypage != 0)
+ (void)dbp->set_bt_minkey(dbp, bi->minkeypage);
+ if (bi->psize != 0)
+ (void)dbp->set_pagesize(dbp, bi->psize);
+ /*
+ * !!!
+ * Comparisons and prefix calls work because the DBT
+ * structures in 1.85 and 2.0 have the same initial
+ * fields.
+ */
+ if (bi->prefix != NULL) {
+ db185p->prefix = bi->prefix;
+ dbp->set_bt_prefix(dbp, db185_prefix);
+ }
+ if (bi->compare != NULL) {
+ db185p->compare = bi->compare;
+ dbp->set_bt_compare(dbp, db185_compare);
+ }
+ if (bi->lorder != 0)
+ dbp->set_lorder(dbp, bi->lorder);
+ }
+ break;
+ case 1: /* DB_HASH */
+ type = DB_HASH;
+ if ((hi = openinfo) != NULL) {
+ if (hi->bsize != 0)
+ (void)dbp->set_pagesize(dbp, hi->bsize);
+ if (hi->ffactor != 0)
+ (void)dbp->set_h_ffactor(dbp, hi->ffactor);
+ if (hi->nelem != 0)
+ (void)dbp->set_h_nelem(dbp, hi->nelem);
+ if (hi->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, hi->cachesize, 0);
+ if (hi->hash != NULL) {
+ db185p->hash = hi->hash;
+ (void)dbp->set_h_hash(dbp, db185_hash);
+ }
+ if (hi->lorder != 0)
+ dbp->set_lorder(dbp, hi->lorder);
+ }
+
+ break;
+ case 2: /* DB_RECNO */
+ type = DB_RECNO;
+
+ /* DB 1.85 did renumbering by default. */
+ (void)dbp->set_flags(dbp, DB_RENUMBER);
+
+ /*
+ * !!!
+ * The file name given to DB 1.85 recno is the name of the DB
+ * 2.0 backing file. If the file doesn't exist, create it if
+ * the user has the O_CREAT flag set, DB 1.85 did it for you,
+ * and DB 2.0 doesn't.
+ *
+ * !!!
+ * Setting the file name to NULL specifies that we're creating
+ * a temporary backing file, in DB 2.X. If we're opening the
+ * DB file read-only, change the flags to read-write, because
+ * temporary backing files cannot be opened read-only, and DB
+ * 2.X will return an error. We are cheating here -- if the
+ * application does a put on the database, it will succeed --
+ * although that would be a stupid thing for the application
+ * to do.
+ *
+ * !!!
+ * Note, the file name in DB 1.85 was a const -- we don't do
+ * that in DB 2.0, so do that cast.
+ */
+ if (file != NULL) {
+ if (oflags & O_CREAT && __os_exists(file, NULL) != 0)
+ if (__os_openhandle(NULL, file,
+ oflags, mode, &fh) == 0)
+ (void)__os_closehandle(NULL, &fh);
+ (void)dbp->set_re_source(dbp, file);
+
+ if (O_RDONLY)
+ oflags &= ~O_RDONLY;
+ oflags |= O_RDWR;
+ file = NULL;
+ }
+
+ if ((ri = openinfo) != NULL) {
+ /*
+ * !!!
+ * We can't support the bfname field.
+ */
+#define BFMSG "DB: DB 1.85's recno bfname field is not supported.\n"
+ if (ri->bfname != NULL) {
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh,
+ BFMSG, sizeof(BFMSG) - 1, &nw);
+ goto einval;
+ }
+
+ if (ri->flags & ~(R_FIXEDLEN | R_NOKEY | R_SNAPSHOT))
+ goto einval;
+ if (ri->flags & R_FIXEDLEN) {
+ if (ri->bval != 0)
+ (void)dbp->set_re_pad(dbp, ri->bval);
+ if (ri->reclen != 0)
+ (void)dbp->set_re_len(dbp, ri->reclen);
+ } else
+ if (ri->bval != 0)
+ (void)dbp->set_re_delim(dbp, ri->bval);
+
+ /*
+ * !!!
+ * We ignore the R_NOKEY flag, but that's okay, it was
+ * only an optimization that was never implemented.
+ */
+ if (ri->flags & R_SNAPSHOT)
+ (void)dbp->set_flags(dbp, DB_SNAPSHOT);
+
+ if (ri->cachesize != 0)
+ (void)dbp->set_cachesize
+ (dbp, 0, ri->cachesize, 0);
+ if (ri->psize != 0)
+ (void)dbp->set_pagesize(dbp, ri->psize);
+ if (ri->lorder != 0)
+ dbp->set_lorder(dbp, ri->lorder);
+ }
+ break;
+ default:
+ goto einval;
+ }
+
+ db185p->close = db185_close;
+ db185p->del = db185_del;
+ db185p->fd = db185_fd;
+ db185p->get = db185_get;
+ db185p->put = db185_put;
+ db185p->seq = db185_seq;
+ db185p->sync = db185_sync;
+
+ /*
+ * Store a reference so we can indirect from the DB 1.85 structure
+ * to the underlying DB structure, and vice-versa. This has to be
+ * done BEFORE the DB::open method call because the hash callback
+ * is exercised as part of hash database initialiation.
+ */
+ db185p->dbp = dbp;
+ dbp->api_internal = db185p;
+
+ /* Open the database. */
+ if ((ret = dbp->open(dbp, NULL,
+ file, NULL, type, __db_oflags(oflags), mode)) != 0)
+ goto err;
+
+ /* Create the cursor used for sequential ops. */
+ if ((ret = dbp->cursor(dbp, NULL, &((DB185 *)db185p)->dbc, 0)) != 0)
+ goto err;
+
+ return (db185p);
+
+err: if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ if (db185p != NULL)
+ __os_free(NULL, db185p);
+ if (dbp != NULL)
+ (void)dbp->close(dbp, 0);
+
+ __os_set_errno(ret);
+ return (NULL);
+}
+
+static int
+db185_close(db185p)
+ DB185 *db185p;
+{
+ DB *dbp;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ ret = dbp->close(dbp, 0);
+
+ __os_free(NULL, db185p);
+
+ if (ret == 0)
+ return (0);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_del(db185p, key185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+
+ if (flags & ~R_CURSOR)
+ goto einval;
+ if (flags & R_CURSOR)
+ ret = db185p->dbc->c_del(db185p->dbc, 0);
+ else
+ ret = dbp->del(dbp, NULL, &key, 0);
+
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_fd(db185p)
+ const DB185 *db185p;
+{
+ DB *dbp;
+ int fd, ret;
+
+ dbp = db185p->dbp;
+
+ if ((ret = dbp->fd(dbp, &fd)) == 0)
+ return (fd);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_get(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ const DBT185 *key185;
+ DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ if (flags)
+ goto einval;
+
+ switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_put(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185;
+ const DBT185 *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBC *dbcp_put;
+ DBT key, data;
+ int ret, t_ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case 0:
+ ret = dbp->put(dbp, NULL, &key, &data, 0);
+ break;
+ case R_CURSOR:
+ ret = db185p->dbc->c_put(db185p->dbc, &key, &data, DB_CURRENT);
+ break;
+ case R_IAFTER:
+ case R_IBEFORE:
+ if (dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0)
+ break;
+ if ((ret =
+ dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) == 0) {
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+ ret = dbcp_put->c_put(dbcp_put, &key, &data,
+ flags == R_IAFTER ? DB_AFTER : DB_BEFORE);
+ }
+ if ((t_ret = dbcp_put->c_close(dbcp_put)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ case R_NOOVERWRITE:
+ ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
+ break;
+ case R_SETCURSOR:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+
+ if ((ret = dbp->put(dbp, NULL, &key, &data, 0)) != 0)
+ break;
+ ret =
+ db185p->dbc->c_get(db185p->dbc, &key, &data, DB_SET_RANGE);
+ break;
+ default:
+ goto einval;
+ }
+
+ switch (ret) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ return (0);
+ case DB_KEYEXIST:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_seq(db185p, key185, data185, flags)
+ const DB185 *db185p;
+ DBT185 *key185, *data185;
+ u_int flags;
+{
+ DB *dbp;
+ DBT key, data;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = key185->data;
+ key.size = key185->size;
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+
+ switch (flags) {
+ case R_CURSOR:
+ flags = DB_SET_RANGE;
+ break;
+ case R_FIRST:
+ flags = DB_FIRST;
+ break;
+ case R_LAST:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_LAST;
+ break;
+ case R_NEXT:
+ flags = DB_NEXT;
+ break;
+ case R_PREV:
+ if (dbp->type != DB_BTREE && dbp->type != DB_RECNO)
+ goto einval;
+ flags = DB_PREV;
+ break;
+ default:
+ goto einval;
+ }
+ switch (ret = db185p->dbc->c_get(db185p->dbc, &key, &data, flags)) {
+ case 0:
+ key185->data = key.data;
+ key185->size = key.size;
+ data185->data = data.data;
+ data185->size = data.size;
+ return (0);
+ case DB_NOTFOUND:
+ return (1);
+ }
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static int
+db185_sync(db185p, flags)
+ const DB185 *db185p;
+ u_int flags;
+{
+ DB *dbp;
+ DB_FH fh;
+ size_t nw;
+ int ret;
+
+ dbp = db185p->dbp;
+
+ switch (flags) {
+ case 0:
+ break;
+ case R_RECNOSYNC:
+ /*
+ * !!!
+ * We can't support the R_RECNOSYNC flag.
+ */
+#define RSMSG "DB: DB 1.85's R_RECNOSYNC sync flag is not supported.\n"
+ db185_openstderr(&fh);
+ (void)__os_write(NULL, &fh, RSMSG, sizeof(RSMSG) - 1, &nw);
+ goto einval;
+ default:
+ goto einval;
+ }
+
+ if ((ret = dbp->sync(dbp, 0)) == 0)
+ return (0);
+
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
+ return (-1);
+}
+
+static void
+db185_openstderr(fhp)
+ DB_FH *fhp;
+{
+ /* Dummy up the results of an __os_openhandle() on stderr. */
+ memset(fhp, 0, sizeof(*fhp));
+ F_SET(fhp, DB_FH_VALID);
+
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ fhp->fd = STDERR_FILENO;
+}
+
+/*
+ * db185_compare --
+ * Cutout routine to call the user's Btree comparison function.
+ */
+static int
+db185_compare(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->api_internal)->compare(a, b));
+}
+
+/*
+ * db185_prefix --
+ * Cutout routine to call the user's Btree prefix function.
+ */
+static size_t
+db185_prefix(dbp, a, b)
+ DB *dbp;
+ const DBT *a, *b;
+{
+ return (((DB185 *)dbp->api_internal)->prefix(a, b));
+}
+
+/*
+ * db185_hash --
+ * Cutout routine to call the user's hash function.
+ */
+static u_int32_t
+db185_hash(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ return (((DB185 *)dbp->api_internal)->hash(key, (size_t)len));
+}
diff --git a/storage/bdb/db185/db185_int.in b/storage/bdb/db185/db185_int.in
new file mode 100644
index 00000000000..a4a3ce19c17
--- /dev/null
+++ b/storage/bdb/db185/db185_int.in
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db185_int.in,v 11.12 2002/01/11 15:51:51 bostic Exp $
+ */
+
+#ifndef _DB185_INT_H_
+#define _DB185_INT_H_
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT185;
+
+/* Access method description structure. */
+typedef struct __db185 {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db185 *));
+ int (*del) __P((const struct __db185 *, const DBT185 *, u_int));
+ int (*get)
+ __P((const struct __db185 *, const DBT185 *, DBT185 *, u_int));
+ int (*put)
+ __P((const struct __db185 *, DBT185 *, const DBT185 *, u_int));
+ int (*seq)
+ __P((const struct __db185 *, DBT185 *, DBT185 *, u_int));
+ int (*sync) __P((const struct __db185 *, u_int));
+ DB *dbp; /* DB structure. Was void *internal. */
+ int (*fd) __P((const struct __db185 *));
+
+ /*
+ * !!!
+ * The following elements added to the end of the DB 1.85 DB
+ * structure.
+ */
+ DBC *dbc; /* DB cursor. */
+ /* Various callback functions. */
+ int (*compare) __P((const DBT *, const DBT *));
+ size_t (*prefix) __P((const DBT *, const DBT *));
+ u_int32_t (*hash) __P((const void *, size_t));
+} DB185;
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+#endif /* !_DB185_INT_H_ */
diff --git a/storage/bdb/db_archive/db_archive.c b/storage/bdb/db_archive/db_archive.c
new file mode 100644
index 00000000000..dc8718e4c03
--- /dev/null
+++ b/storage/bdb/db_archive/db_archive.c
@@ -0,0 +1,180 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_archive.c,v 11.36 2002/03/28 20:13:34 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "ah:lP:sVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ free(list);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_archive [-alsVv] [-h home] [-P password]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_checkpoint/db_checkpoint.c b/storage/bdb/db_checkpoint/db_checkpoint.c
new file mode 100644
index 00000000000..a59572c5f76
--- /dev/null
+++ b/storage/bdb/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,243 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_checkpoint.c,v 11.46 2002/08/08 03:50:31 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = passwd = NULL;
+ while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "DB_ENV->memp_register: failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv,
+ kbytes, minutes, flags)) != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_checkpoint [-1Vv]",
+ "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_deadlock/db_deadlock.c b/storage/bdb/db_deadlock/db_deadlock.c
new file mode 100644
index 00000000000..523918b9ea4
--- /dev/null
+++ b/storage/bdb/db_deadlock/db_deadlock.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_deadlock.c,v 11.38 2002/08/08 03:50:32 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
+ u_int32_t atype;
+ time_t now;
+ long secs, usecs;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile, *str;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ secs = usecs = 0;
+ e_close = exitval = verbose = 0;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ return (usage());
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ return (usage());
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (usage());
+
+ break;
+
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_deadlock [-Vv]",
+ "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_dump/db_dump.c b/storage/bdb/db_dump/db_dump.c
new file mode 100644
index 00000000000..143884a3fa8
--- /dev/null
+++ b/storage/bdb/db_dump/db_dump.c
@@ -0,0 +1,611 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_dump.c,v 11.80 2002/08/08 03:50:34 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_init __P((DB_ENV *, char *, int, u_int32_t, int *));
+int dump __P((DB *, int, int));
+int dump_sub __P((DB_ENV *, DB *, char *, int, int));
+int is_sub __P((DB *, int *));
+int main __P((int, char *[]));
+int show_subs __P((DB *));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t cache;
+ int ch, d_close;
+ int e_close, exitval, keyflag, lflag, nflag, pflag, private;
+ int ret, Rflag, rflag, resize, subs;
+ char *dopt, *home, *passwd, *subname;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ cache = MEGABYTE;
+ private = 0;
+ dopt = home = passwd = subname = NULL;
+ while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto err;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(dbenv, home, rflag, cache, &private) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE |
+ (Rflag ? DB_AGGRESSIVE : 0) |
+ (pflag ? DB_PRINTABLE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+ if (private != 0) {
+ if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, is_salvage, cache, is_privatep)
+ DB_ENV *dbenv;
+ char *home;
+ int is_salvage;
+ u_int32_t cache;
+ int *is_privatep;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
+ *
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
+ */
+ *is_privatep = 0;
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ *is_privatep = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ free(btsp);
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ free(hsp);
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp, NULL,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ failed = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
+ }
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ failed = 1;
+ }
+
+err: if (data.data != NULL)
+ free(data.data);
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ failed = 1;
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (failed);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_dump [-klNprRV]",
+ "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_dump185/db_dump185.c b/storage/bdb/db_dump185/db_dump185.c
new file mode 100644
index 00000000000..97164f34a9a
--- /dev/null
+++ b/storage/bdb/db_dump185/db_dump185.c
@@ -0,0 +1,355 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef lint
+static char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static char revid[] =
+ "$Id: db_dump185.c,v 11.17 2002/08/08 03:50:35 bostic Exp $";
+#endif
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+/* Hash Table Information */
+typedef struct hashhdr185 { /* Disk resident portion */
+ int magic; /* Magic NO for hash tables */
+ int version; /* Version ID */
+ u_int32_t lorder; /* Byte Order */
+ int bsize; /* Bucket/Page Size */
+ int bshift; /* Bucket shift */
+ int dsize; /* Directory Size */
+ int ssize; /* Segment Size */
+ int sshift; /* Segment shift */
+ int ovfl_point; /* Where overflow pages are being
+ * allocated */
+ int last_freed; /* Last overflow page freed */
+ int max_bucket; /* ID of Maximum bucket in use */
+ int high_mask; /* Mask to modulo into entire table */
+ int low_mask; /* Mask to modulo into lower half of
+ * table */
+ int ffactor; /* Fill factor */
+ int nkeys; /* Number of keys in hash table */
+} HASHHDR185;
+typedef struct htab185 { /* Memory resident data structure */
+ HASHHDR185 hdr; /* Header */
+} HTAB185;
+
+/* Hash Table Information */
+typedef struct hashhdr186 { /* Disk resident portion */
+ int32_t magic; /* Magic NO for hash tables */
+ int32_t version; /* Version ID */
+ int32_t lorder; /* Byte Order */
+ int32_t bsize; /* Bucket/Page Size */
+ int32_t bshift; /* Bucket shift */
+ int32_t ovfl_point; /* Where overflow pages are being allocated */
+ int32_t last_freed; /* Last overflow page freed */
+ int32_t max_bucket; /* ID of Maximum bucket in use */
+ int32_t high_mask; /* Mask to modulo into entire table */
+ int32_t low_mask; /* Mask to modulo into lower half of table */
+ int32_t ffactor; /* Fill factor */
+ int32_t nkeys; /* Number of keys in hash table */
+ int32_t hdrpages; /* Size of table header */
+ int32_t h_charkey; /* value of hash(CHARKEY) */
+#define NCACHED 32 /* number of bit maps and spare points */
+ int32_t spares[NCACHED];/* spare pages for overflow */
+ /* address of overflow page bitmaps */
+ u_int16_t bitmaps[NCACHED];
+} HASHHDR186;
+typedef struct htab186 { /* Memory resident data structure */
+ void *unused[2];
+ HASHHDR186 hdr; /* Header */
+} HTAB186;
+
+typedef struct _epgno {
+ u_int32_t pgno; /* the page number */
+ u_int16_t index; /* the index on the page */
+} EPGNO;
+
+typedef struct _epg {
+ void *page; /* the (pinned) page */
+ u_int16_t index; /* the index on the page */
+} EPG;
+
+typedef struct _cursor {
+ EPGNO pg; /* B: Saved tree reference. */
+ DBT key; /* B: Saved key, or key.data == NULL. */
+ u_int32_t rcursor; /* R: recno cursor (1-based) */
+
+#define CURS_ACQUIRE 0x01 /* B: Cursor needs to be reacquired. */
+#define CURS_AFTER 0x02 /* B: Unreturned cursor after key. */
+#define CURS_BEFORE 0x04 /* B: Unreturned cursor before key. */
+#define CURS_INIT 0x08 /* RB: Cursor initialized. */
+ u_int8_t flags;
+} CURSOR;
+
+/* The in-memory btree/recno data structure. */
+typedef struct _btree {
+ void *bt_mp; /* memory pool cookie */
+
+ void *bt_dbp; /* pointer to enclosing DB */
+
+ EPG bt_cur; /* current (pinned) page */
+ void *bt_pinned; /* page pinned across calls */
+
+ CURSOR bt_cursor; /* cursor */
+
+ EPGNO bt_stack[50]; /* stack of parent pages */
+ EPGNO *bt_sp; /* current stack pointer */
+
+ DBT bt_rkey; /* returned key */
+ DBT bt_rdata; /* returned data */
+
+ int bt_fd; /* tree file descriptor */
+
+ u_int32_t bt_free; /* next free page */
+ u_int32_t bt_psize; /* page size */
+ u_int16_t bt_ovflsize; /* cut-off for key/data overflow */
+ int bt_lorder; /* byte order */
+ /* sorted order */
+ enum { NOT, BACK, FORWARD } bt_order;
+ EPGNO bt_last; /* last insert */
+
+ /* B: key comparison function */
+ int (*bt_cmp) __P((DBT *, DBT *));
+ /* B: prefix comparison function */
+ size_t (*bt_pfx) __P((DBT *, DBT *));
+ /* R: recno input function */
+ int (*bt_irec) __P((struct _btree *, u_int32_t));
+
+ FILE *bt_rfp; /* R: record FILE pointer */
+ int bt_rfd; /* R: record file descriptor */
+
+ void *bt_cmap; /* R: current point in mapped space */
+ void *bt_smap; /* R: start of mapped space */
+ void *bt_emap; /* R: end of mapped space */
+ size_t bt_msize; /* R: size of mapped region. */
+
+ u_int32_t bt_nrecs; /* R: number of records */
+ size_t bt_reclen; /* R: fixed record length */
+ u_char bt_bval; /* R: delimiting byte/pad character */
+
+/*
+ * NB:
+ * B_NODUPS and R_RECNO are stored on disk, and may not be changed.
+ */
+#define B_INMEM 0x00001 /* in-memory tree */
+#define B_METADIRTY 0x00002 /* need to write metadata */
+#define B_MODIFIED 0x00004 /* tree modified */
+#define B_NEEDSWAP 0x00008 /* if byte order requires swapping */
+#define B_RDONLY 0x00010 /* read-only tree */
+
+#define B_NODUPS 0x00020 /* no duplicate keys permitted */
+#define R_RECNO 0x00080 /* record oriented tree */
+
+#define R_CLOSEFP 0x00040 /* opened a file pointer */
+#define R_EOF 0x00100 /* end of input file reached. */
+#define R_FIXLEN 0x00200 /* fixed length records */
+#define R_MEMMAPPED 0x00400 /* memory mapped file. */
+#define R_INMEM 0x00800 /* in-memory file */
+#define R_MODIFIED 0x01000 /* modified file */
+#define R_RDONLY 0x02000 /* read-only file */
+
+#define B_DB_LOCK 0x04000 /* DB_LOCK specified. */
+#define B_DB_SHMEM 0x08000 /* DB_SHMEM specified. */
+#define B_DB_TXN 0x10000 /* DB_TXN specified. */
+ u_int32_t flags;
+} BTREE;
+
+void db_btree __P((DB *, int));
+void db_hash __P((DB *, int));
+void dbt_dump __P((DBT *));
+void dbt_print __P((DBT *));
+int main __P((int, char *[]));
+int usage __P((void));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DBT key, data;
+ int ch, pflag, rval;
+
+ pflag = 0;
+ while ((ch = getopt(argc, argv, "f:p")) != EOF)
+ switch (ch) {
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "db_dump185: %s: %s\n",
+ optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ if ((dbp = dbopen(argv[0], O_RDONLY, 0, DB_BTREE, NULL)) == NULL) {
+ if ((dbp =
+ dbopen(argv[0], O_RDONLY, 0, DB_HASH, NULL)) == NULL) {
+ fprintf(stderr,
+ "db_dump185: %s: %s\n", argv[0], strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ db_hash(dbp, pflag);
+ } else
+ db_btree(dbp, pflag);
+
+ /*
+ * !!!
+ * DB 1.85 DBTs are a subset of DB 2.0 DBTs, so we just use the
+ * new dump/print routines.
+ */
+ if (pflag)
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_print(&key);
+ dbt_print(&data);
+ }
+ else
+ while (!(rval = dbp->seq(dbp, &key, &data, R_NEXT))) {
+ dbt_dump(&key);
+ dbt_dump(&data);
+ }
+
+ if (rval == -1) {
+ fprintf(stderr, "db_dump185: seq: %s\n", strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+}
+
+/*
+ * db_hash --
+ * Dump out hash header information.
+ */
+void
+db_hash(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ HTAB185 *hash185p;
+ HTAB186 *hash186p;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=hash\n");
+
+ /* DB 1.85 was version 2, DB 1.86 was version 3. */
+ hash185p = dbp->internal;
+ if (hash185p->hdr.version > 2) {
+ hash186p = dbp->internal;
+ printf("h_ffactor=%lu\n", (u_long)hash186p->hdr.ffactor);
+ if (hash186p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash186p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash186p->hdr.bsize);
+ } else {
+ printf("h_ffactor=%lu\n", (u_long)hash185p->hdr.ffactor);
+ if (hash185p->hdr.lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)hash185p->hdr.lorder);
+ printf("db_pagesize=%lu\n", (u_long)hash185p->hdr.bsize);
+ }
+ printf("HEADER=END\n");
+}
+
+/*
+ * db_btree --
+ * Dump out btree header information.
+ */
+void
+db_btree(dbp, pflag)
+ DB *dbp;
+ int pflag;
+{
+ BTREE *btp;
+
+ btp = dbp->internal;
+
+ printf("format=%s\n", pflag ? "print" : "bytevalue");
+ printf("type=btree\n");
+#ifdef NOT_AVAILABLE_IN_185
+ printf("bt_minkey=%lu\n", (u_long)XXX);
+ printf("bt_maxkey=%lu\n", (u_long)XXX);
+#endif
+ if (btp->bt_lorder != 0)
+ printf("db_lorder=%lu\n", (u_long)btp->bt_lorder);
+ printf("db_pagesize=%lu\n", (u_long)btp->bt_psize);
+ if (!(btp->flags & B_NODUPS))
+ printf("duplicates=1\n");
+ printf("HEADER=END\n");
+}
+
+static char hex[] = "0123456789abcdef";
+
+/*
+ * dbt_dump --
+ * Write out a key or data item using byte values.
+ */
+void
+dbt_dump(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ (void)printf("%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * dbt_print --
+ * Write out a key or data item using printable characters.
+ */
+void
+dbt_print(dbtp)
+ DBT *dbtp;
+{
+ size_t len;
+ u_int8_t *p;
+
+ for (len = dbtp->size, p = dbtp->data; len--; ++p)
+ if (isprint((int)*p)) {
+ if (*p == '\\')
+ (void)printf("\\");
+ (void)printf("%c", *p);
+ } else
+ (void)printf("\\%c%c",
+ hex[(*p & 0xf0) >> 4], hex[*p & 0x0f]);
+ printf("\n");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "usage: db_dump185 [-p] [-f file] db_file\n");
+ return (EXIT_FAILURE);
+}
diff --git a/storage/bdb/db_load/db_load.c b/storage/bdb/db_load/db_load.c
new file mode 100644
index 00000000000..d27fca04ec0
--- /dev/null
+++ b/storage/bdb/db_load/db_load.c
@@ -0,0 +1,1232 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_load.c,v 11.71 2002/08/08 03:50:36 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ char *hdrbuf; /* Input file header. */
+ u_long lineno; /* Input file line number. */
+ u_long origline; /* Original file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+ char *home; /* Env home. */
+ char *passwd; /* Env passwd. */
+ int private; /* Private env. */
+ u_int32_t cache; /* Env cache size. */
+} LDG;
+
+void badend __P((DB_ENV *));
+void badnum __P((DB_ENV *));
+int configure __P((DB_ENV *, DB *, char **, char **, int *));
+int convprintable __P((DB_ENV *, char *, char **));
+int db_init __P((DB_ENV *, char *, u_int32_t, int *));
+int dbt_rdump __P((DB_ENV *, DBT *));
+int dbt_rprint __P((DB_ENV *, DBT *));
+int dbt_rrecno __P((DB_ENV *, DBT *, int));
+int digitize __P((DB_ENV *, int, int *));
+int env_create __P((DB_ENV **, LDG *));
+int load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
+int main __P((int, char *[]));
+int rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int usage __P((void));
+int version_check __P((const char *));
+
+#define G(f) ((LDG *)dbenv->app_private)->f
+
+ /* Flags to the load function. */
+#define LDF_NOHEADER 0x01 /* No dump header. */
+#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */
+#define LDF_PASSWORD 0x04 /* Encrypt created databases. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DBTYPE dbtype;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t ldf;
+ int ch, existed, exitval, ret;
+ char **clist, **clp;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+ ldg.cache = MEGABYTE;
+ ldg.hdrbuf = NULL;
+ ldg.home = NULL;
+ ldg.passwd = NULL;
+
+ if ((ret = version_check(ldg.progname)) != 0)
+ return (ret);
+
+ ldf = 0;
+ exitval = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
+ }
+
+ while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ ldg.home = optarg;
+ break;
+ case 'n':
+ ldf |= LDF_NOOVERWRITE;
+ break;
+ case 'P':
+ ldg.passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (ldg.passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ ldg.progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ ldf |= LDF_PASSWORD;
+ break;
+ case 'T':
+ ldf |= LDF_NOHEADER;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ return (usage());
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if (env_create(&dbenv, &ldg) != 0)
+ goto shutdown;
+
+ while (!ldg.endofile)
+ if (load(dbenv, argv[0], dbtype, clist, ldf,
+ &ldg, &existed) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+ free(clist);
+
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+load(dbenv, name, argtype, clist, flags, ldg, existedp)
+ DB_ENV *dbenv;
+ char *name, **clist;
+ DBTYPE argtype;
+ u_int flags;
+ LDG *ldg;
+ int *existedp;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ u_int32_t put_flags;
+ int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval;
+ char *subdb;
+
+ *existedp = 0;
+
+ put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0;
+ G(endodata) = 0;
+
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+retry_db:
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (LF_ISSET(LDF_NOHEADER)) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (G(endofile))
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (configure(dbenv, dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO &&
+ argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* If configured with a password, encrypt databases we create. */
+ if (LF_ISSET(LDF_PASSWORD) &&
+ (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
+ dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+ goto err;
+ }
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
+ DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
+ __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+ if (ldg->private != 0) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ dbp->close(dbp, 0);
+ dbp = NULL;
+ dbenv->close(dbenv, 0);
+ if ((ret = env_create(&dbenv, ldg)) != 0)
+ goto err;
+ goto retry_db;
+ }
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (dbt_rprint(dbenv, &data))
+ goto err;
+ } else {
+ if (dbt_rdump(dbenv, &data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (dbt_rprint(dbenv, readp))
+ goto err;
+ if (!G(endodata) && dbt_rprint(dbenv, &data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (dbt_rrecno(dbenv, readp, hexkeys))
+ goto err;
+ } else
+ if (dbt_rdump(dbenv, readp))
+ goto err;
+ if (!G(endodata) && dbt_rdump(dbenv, &data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (G(endodata))
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ *existedp = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)ctxn->abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn->abort(txn);
+ }
+
+ /* Close the database. */
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = 1;
+ }
+
+ if (G(hdrbuf) != NULL)
+ free(G(hdrbuf));
+ G(hdrbuf) = NULL;
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t flags;
+ int ret;
+
+ *is_private = 0;
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ *is_private = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ G(progname), name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ badnum(dbenv); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if (*subdbp != NULL)
+ free(*subdbp);
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int ch, first, hdr, linelen, buflen, ret, start;
+ char *buf, *name, *p, *value;
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+ name = p = NULL;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if (G(hdrbuf) == NULL) {
+ hdr = 0;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+ G(hdrbuf) = buf;
+ G(origline) = G(lineno);
+ } else {
+ hdr = 1;
+ buf = G(hdrbuf);
+ G(lineno) = G(origline);
+ }
+
+ start = 0;
+ for (first = 1;; first = 0) {
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ buf = &G(hdrbuf)[start];
+ if (hdr == 0) {
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
+
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen + start == buflen) {
+ G(hdrbuf) = (char *)realloc(G(hdrbuf),
+ buflen *= 2);
+ if (G(hdrbuf) == NULL)
+ goto memerr;
+ buf = &G(hdrbuf)[start];
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen++] = '\0';
+ } else
+ linelen = strlen(buf) + 1;
+ start += linelen;
+
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ name = NULL;
+ }
+ /* If we don't see the expected information, it's an error. */
+ if ((name = strdup(buf)) == NULL)
+ goto memerr;
+ if ((p = strchr(name, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+
+ value = p--;
+
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ G(version) = atoi(value);
+
+ if (G(version) > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ G(lineno), G(version));
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ goto err;
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((ret = convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ badnum(dbenv);
+ goto err;
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword \"%s\"",
+ name);
+ goto err;
+ }
+ ret = 0;
+ if (0) {
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ ret = 1;
+ }
+ if (0)
+err: ret = 1;
+ if (0) {
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ ret = 1;
+ }
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ }
+ return (ret);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ if ((outstr = (char *)malloc(strlen(instr))) == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = digitize(dbenv, *instr, &e1) << 4;
+ c |= digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
+}
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ badend(dbenv);
+ return (1);
+ }
+ c1 = digitize(dbenv,
+ c1, &e) << 4 | digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ badend(dbenv);
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = digitize(dbenv, c1, &e) << 4 | digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++G(lineno);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ G(endodata) = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: badend(dbenv);
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+badnum(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+badend(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV] [-c name=value] [-f file]",
+ "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+env_create(dbenvp, ldg)
+ DB_ENV **dbenvp;
+ LDG *ldg;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if ((ret = db_env_create(dbenvp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv = *dbenvp;
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg->progname);
+ if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ ldg->passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ return (ret);
+ }
+ if ((ret = db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0)
+ return (ret);
+ dbenv->app_private = ldg;
+
+ return (0);
+}
diff --git a/storage/bdb/db_printlog/README b/storage/bdb/db_printlog/README
new file mode 100644
index 00000000000..d59f4c77f55
--- /dev/null
+++ b/storage/bdb/db_printlog/README
@@ -0,0 +1,34 @@
+# $Id: README,v 10.6 2002/06/20 14:52:54 bostic Exp $
+
+Berkeley DB log dump utility. This utility dumps out a DB log in human
+readable form, a record at a time, to assist in recovery and transaction
+abort debugging.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+commit.awk Output transaction ID of committed transactions.
+
+count.awk Print out the number of log records for transactions
+ that we encountered.
+
+dbname.awk Take a comma-separated list of database names and spit
+ out all the log records that affect those databases.
+
+fileid.awk Take a comma-separated list of file numbers and spit out
+ all the log records that affect those file numbers.
+
+logstat.awk Display log record count/size statistics.
+
+pgno.awk Take a comma-separated list of page numbers and spit
+ out all the log records that affect those page numbers.
+
+range.awk Print out a range of the log.
+
+rectype.awk Print out a range of the log -- command line should
+ set RECTYPE to the a comma separated list of the
+ rectypes (or partial strings of rectypes) sought.
+
+status.awk Read through db_printlog output and list the transactions
+ encountered, and whether they commited or aborted.
+
+txn.awk Print out all the records for a comma-separated list of
+ transaction IDs.
diff --git a/storage/bdb/db_printlog/commit.awk b/storage/bdb/db_printlog/commit.awk
new file mode 100644
index 00000000000..66391d3fb63
--- /dev/null
+++ b/storage/bdb/db_printlog/commit.awk
@@ -0,0 +1,7 @@
+# $Id: commit.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Output tid of committed transactions.
+
+/txn_regop/ {
+ print $5
+}
diff --git a/storage/bdb/db_printlog/count.awk b/storage/bdb/db_printlog/count.awk
new file mode 100644
index 00000000000..1d5a291950f
--- /dev/null
+++ b/storage/bdb/db_printlog/count.awk
@@ -0,0 +1,9 @@
+# $Id: count.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Print out the number of log records for transactions that we
+# encountered.
+
+/^\[/{
+ if ($5 != 0)
+ print $5
+}
diff --git a/storage/bdb/db_printlog/db_printlog.c b/storage/bdb/db_printlog/db_printlog.c
new file mode 100644
index 00000000000..af6d00d593a
--- /dev/null
+++ b/storage/bdb/db_printlog/db_printlog.c
@@ -0,0 +1,360 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_printlog.c,v 11.52 2002/08/08 03:50:38 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int open_rep_db __P((DB_ENV *, DB **, DBC **));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_printlog";
+ DB *dbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data, keydbt;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, rflag, ret, repflag;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ dbc = NULL;
+ logc = NULL;
+ e_close = exitval = nflag = rflag = repflag = 0;
+ home = passwd = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'R':
+ repflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Set up an app-specific dispatch function so that we can gracefully
+ * handle app-specific log records.
+ */
+ if ((ret = dbenv->set_app_dispatch(dbenv, print_app_record)) != 0) {
+ dbenv->err(dbenv, ret, "app_dispatch");
+ goto shutdown;
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ * If we are reading the replication database, do not open the env
+ * with logging, because we don't want to log the opens.
+ */
+ if (repflag) {
+ if ((ret = dbenv->open(dbenv, home,
+ DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0))
+ != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ } else if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ /* Allocate a log cursor. */
+ if (repflag) {
+ if ((ret = open_rep_db(dbenv, &dbp, &dbc)) != 0)
+ goto shutdown;
+ } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ memset(&keydbt, 0, sizeof(keydbt));
+ while (!__db_util_interrupted()) {
+ if (repflag) {
+ ret = dbc->c_get(dbc,
+ &keydbt, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret == 0)
+ key = ((REP_CONTROL *)keydbt.data)->lsn;
+ } else
+ ret = logc->get(logc,
+ &key, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv,
+ ret, repflag ? "DB_LOGC->get" : "DBC->get");
+ goto shutdown;
+ }
+
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ exitval = 1;
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0)
+ exitval = 1;
+
+ /*
+ * The dtab is allocated by __db_add_recovery (called by *_init_print)
+ * using the library malloc function (__os_malloc). It thus needs to be
+ * freed using the corresponding free (__os_free).
+ */
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_printlog [-NrV] [-h home] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+/* Print an unknown, application-specific log record as best we can. */
+int
+print_app_record(dbenv, dbt, lsnp, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsnp;
+ db_recops op;
+{
+ int ch;
+ u_int32_t i, rectype;
+
+ DB_ASSERT(op == DB_TXN_PRINT);
+ COMPQUIET(dbenv, NULL);
+
+ /*
+ * Fetch the rectype, which always must be at the beginning of the
+ * record (if dispatching is to work at all).
+ */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ /*
+ * Applications may wish to customize the output here based on the
+ * rectype. We just print the entire log record in the generic
+ * mixed-hex-and-printable format we use for binary data.
+ */
+ printf("[%lu][%lu]application specific record: rec: %lu\n",
+ (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype);
+ printf("\tdata: ");
+ for (i = 0; i < dbt->size; i++) {
+ ch = ((u_int8_t *)dbt->data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ printf("\n\n");
+
+ return (0);
+}
+
+int
+open_rep_db(dbenv, dbpp, dbcp)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ DBC **dbcp;
+{
+ int ret;
+
+ DB *dbp;
+ *dbpp = NULL;
+ *dbcp = NULL;
+
+ if ((ret = db_create(dbpp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ret);
+ }
+
+ dbp = *dbpp;
+ if ((ret =
+ dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ return (0);
+
+err: if (*dbpp != NULL)
+ (void)(*dbpp)->close(*dbpp, 0);
+ return (ret);
+}
diff --git a/storage/bdb/db_printlog/dbname.awk b/storage/bdb/db_printlog/dbname.awk
new file mode 100644
index 00000000000..47955994579
--- /dev/null
+++ b/storage/bdb/db_printlog/dbname.awk
@@ -0,0 +1,79 @@
+# $Id: dbname.awk,v 1.5 2002/05/07 05:45:51 ubell Exp $
+#
+# Take a comma-separated list of database names and spit out all the
+# log records that affect those databases.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(DBNAME, ",")) != 0) {
+ filenames[nfiles] = substr(DBNAME, 1, ndx - 1) 0;
+ DBNAME = substr(DBNAME, ndx + 1, length(DBNAME) - ndx);
+ files[nfiles] = -1
+ nfiles++
+ }
+ filenames[nfiles] = DBNAME 0;
+ files[nfiles] = -1
+ myfile = -1;
+}
+
+/^\[.*dbreg_register/ {
+ register = 1;
+}
+/opcode:/ {
+ if (register == 1) {
+ if ($2 == 1)
+ register = 3;
+ else
+ register = $2;
+ }
+}
+/name:/ {
+ if (register >= 2) {
+ for (i = 0; i <= nfiles; i++) {
+ if ($2 == filenames[i]) {
+ if (register == 2) {
+ printme = 0;
+ myfile = -1;
+ files[i] = -1;
+ } else {
+ myfile = i;
+ }
+ break;
+ }
+ }
+ }
+ register = 0;
+}
+/fileid:/{
+ if (myfile != -1) {
+ files[myfile] = $2;
+ printme = 1;
+ register = 0;
+ myfile = -1;
+ } else
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i]) {
+ printme = 1
+ break;
+ }
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+
+TXN == 1 && /txn_regop/ {printme = 1}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/storage/bdb/db_printlog/fileid.awk b/storage/bdb/db_printlog/fileid.awk
new file mode 100644
index 00000000000..020644039ab
--- /dev/null
+++ b/storage/bdb/db_printlog/fileid.awk
@@ -0,0 +1,37 @@
+# $Id: fileid.awk,v 10.4 2000/07/17 22:07:17 ubell Exp $
+#
+# Take a comma-separated list of file numbers and spit out all the
+# log records that affect those file numbers.
+
+NR == 1 {
+ nfiles = 0
+ while ((ndx = index(FILEID, ",")) != 0) {
+ files[nfiles] = substr(FILEID, 1, ndx - 1);
+ FILEID = substr(FILEID, ndx + 1, length(FILEID) - ndx);
+ nfiles++
+ }
+ files[nfiles] = FILEID;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/fileid/{
+ for (i = 0; i <= nfiles; i++)
+ if ($2 == files[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/storage/bdb/db_printlog/logstat.awk b/storage/bdb/db_printlog/logstat.awk
new file mode 100644
index 00000000000..1009343eba4
--- /dev/null
+++ b/storage/bdb/db_printlog/logstat.awk
@@ -0,0 +1,36 @@
+# $Id: logstat.awk,v 1.1 2002/05/10 15:19:13 bostic Exp $
+#
+# Output accumulated log record count/size statistics.
+BEGIN {
+ l_file = 0;
+ l_offset = 0;
+}
+
+/^\[/{
+ gsub("[][: ]", " ", $1)
+ split($1, a)
+
+ if (a[1] == l_file) {
+ l[a[3]] += a[2] - l_offset
+ ++n[a[3]]
+ } else
+ ++s[a[3]]
+
+ l_file = a[1]
+ l_offset = a[2]
+}
+
+END {
+ # We can't figure out the size of the first record in each log file,
+ # use the average for other records we found as an estimate.
+ for (i in s)
+ if (s[i] != 0 && n[i] != 0) {
+ l[i] += s[i] * (l[i]/n[i])
+ n[i] += s[i]
+ delete s[i]
+ }
+ for (i in l)
+ printf "%s: %d (n: %d, avg: %.2f)\n", i, l[i], n[i], l[i]/n[i]
+ for (i in s)
+ printf "%s: unknown (n: %d, unknown)\n", i, s[i]
+}
diff --git a/storage/bdb/db_printlog/pgno.awk b/storage/bdb/db_printlog/pgno.awk
new file mode 100644
index 00000000000..289fa853bc4
--- /dev/null
+++ b/storage/bdb/db_printlog/pgno.awk
@@ -0,0 +1,47 @@
+# $Id: pgno.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+#
+# Take a comma-separated list of page numbers and spit out all the
+# log records that affect those page numbers.
+
+NR == 1 {
+ npages = 0
+ while ((ndx = index(PGNO, ",")) != 0) {
+ pgno[npages] = substr(PGNO, 1, ndx - 1);
+ PGNO = substr(PGNO, ndx + 1, length(PGNO) - ndx);
+ npages++
+ }
+ pgno[npages] = PGNO;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ rec = $0
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+/pgno/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/right/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+/left/{
+ for (i = 0; i <= npages; i++)
+ if ($2 == pgno[i])
+ printme = 1
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/storage/bdb/db_printlog/range.awk b/storage/bdb/db_printlog/range.awk
new file mode 100644
index 00000000000..7abb410b40f
--- /dev/null
+++ b/storage/bdb/db_printlog/range.awk
@@ -0,0 +1,27 @@
+# $Id: range.awk,v 10.2 1999/11/21 18:01:42 bostic Exp $
+#
+# Print out a range of the log
+
+/^\[/{
+ l = length($1) - 1;
+ i = index($1, "]");
+ file = substr($1, 2, i - 2);
+ file += 0;
+ start = i + 2;
+ offset = substr($1, start, l - start + 1);
+ i = index(offset, "]");
+ offset = substr($1, start, i - 1);
+ offset += 0;
+
+ if ((file == START_FILE && offset >= START_OFFSET || file > START_FILE)\
+ && (file < END_FILE || (file == END_FILE && offset < END_OFFSET)))
+ printme = 1
+ else if (file == END_FILE && offset > END_OFFSET || file > END_FILE)
+ exit
+ else
+ printme = 0
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/storage/bdb/db_printlog/rectype.awk b/storage/bdb/db_printlog/rectype.awk
new file mode 100644
index 00000000000..7f7b2f5ee15
--- /dev/null
+++ b/storage/bdb/db_printlog/rectype.awk
@@ -0,0 +1,27 @@
+# $Id: rectype.awk,v 11.3 2000/07/17 22:00:49 ubell Exp $
+#
+# Print out a range of the log
+# Command line should set RECTYPE to the a comma separated list
+# of the rectypes (or partial strings of rectypes) sought.
+NR == 1 {
+ ntypes = 0
+ while ((ndx = index(RECTYPE, ",")) != 0) {
+ types[ntypes] = substr(RECTYPE, 1, ndx - 1);
+ RECTYPE = substr(RECTYPE, ndx + 1, length(RECTYPE) - ndx);
+ ntypes++
+ }
+ types[ntypes] = RECTYPE;
+}
+
+/^\[/{
+ printme = 0
+ for (i = 0; i <= ntypes; i++)
+ if (index($1, types[i]) != 0) {
+ printme = 1
+ break;
+ }
+}
+{
+ if (printme == 1)
+ print $0
+}
diff --git a/storage/bdb/db_printlog/status.awk b/storage/bdb/db_printlog/status.awk
new file mode 100644
index 00000000000..13df0b6194a
--- /dev/null
+++ b/storage/bdb/db_printlog/status.awk
@@ -0,0 +1,46 @@
+# $Id: status.awk,v 10.3 2002/04/11 01:35:24 margo Exp $
+#
+# Read through db_printlog output and list all the transactions encountered
+# and whether they commited or aborted.
+#
+# 1 = started
+# 2 = commited
+# 3 = explicitly aborted
+# 4 = other
+BEGIN {
+ cur_txn = 0
+}
+/^\[/{
+ in_regop = 0
+ if (status[$5] == 0) {
+ status[$5] = 1;
+ txns[cur_txn] = $5;
+ cur_txn++;
+ }
+}
+/txn_regop/ {
+ txnid = $5
+ in_regop = 1
+}
+/opcode:/ {
+ if (in_regop == 1) {
+ if ($2 == 1)
+ status[txnid] = 2
+ else if ($2 == 3)
+ status[txnid] = 3
+ else
+ status[txnid] = 4
+ }
+}
+END {
+ for (i = 0; i < cur_txn; i++) {
+ if (status[txns[i]] == 1)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 2)
+ printf("%s\tCOMMIT\n", txns[i]);
+ if (status[txns[i]] == 3)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 4)
+ printf("%s\tOTHER\n", txns[i]);
+ }
+}
diff --git a/storage/bdb/db_printlog/txn.awk b/storage/bdb/db_printlog/txn.awk
new file mode 100644
index 00000000000..be8c44e1092
--- /dev/null
+++ b/storage/bdb/db_printlog/txn.awk
@@ -0,0 +1,34 @@
+# $Id: txn.awk,v 10.3 2000/07/17 22:07:17 ubell Exp $
+#
+# Print out all the records for a comma-separated list of transaction ids.
+NR == 1 {
+ ntxns = 0
+ while ((ndx = index(TXN, ",")) != 0) {
+ txn[ntxns] = substr(TXN, 1, ndx - 1);
+ TXN = substr(TXN, ndx + 1, length(TXN) - ndx);
+ ntxns++
+ }
+ txn[ntxns] = TXN;
+}
+
+/^\[/{
+ if (printme == 1) {
+ printf("%s\n", rec);
+ printme = 0
+ }
+ rec = "";
+
+ for (i = 0; i <= ntxns; i++)
+ if (txn[i] == $5) {
+ rec = $0
+ printme = 1
+ }
+}
+/^ /{
+ rec = sprintf("%s\n%s", rec, $0);
+}
+
+END {
+ if (printme == 1)
+ printf("%s\n", rec);
+}
diff --git a/storage/bdb/db_recover/db_recover.c b/storage/bdb/db_recover/db_recover.c
new file mode 100644
index 00000000000..b6414267f93
--- /dev/null
+++ b/storage/bdb/db_recover/db_recover.c
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_recover.c,v 11.33 2002/03/28 20:13:42 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+int main __P((int, char *[]));
+int read_timestamp __P((const char *, char *, time_t *));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ home = passwd = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = retain_env = verbose = 0;
+ while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'e':
+ retain_env = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if ((ret =
+ read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+int
+read_timestamp(progname, arg, timep)
+ const char *progname;
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHROUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "%s\n",
+"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_stat/db_stat.c b/storage/bdb/db_stat/db_stat.c
new file mode 100644
index 00000000000..a2b01b71e0a
--- /dev/null
+++ b/storage/bdb/db_stat/db_stat.c
@@ -0,0 +1,1267 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_stat.c,v 11.125 2002/08/08 15:26:15 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET,
+ T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
+
+int argcheck __P((char *, const char *));
+int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
+void dl __P((const char *, u_long));
+void dl_bytes __P((const char *, u_long, u_long, u_long));
+int env_stats __P((DB_ENV *, u_int32_t));
+int hash_stats __P((DB_ENV *, DB *, int));
+int lock_stats __P((DB_ENV *, char *, u_int32_t));
+int log_stats __P((DB_ENV *, u_int32_t));
+int main __P((int, char *[]));
+int mpool_stats __P((DB_ENV *, char *, u_int32_t));
+void prflags __P((u_int32_t, const FN *));
+int queue_stats __P((DB_ENV *, DB *, int));
+int rep_stats __P((DB_ENV *, u_int32_t));
+int txn_compare __P((const void *, const void *));
+int txn_stats __P((DB_ENV *, u_int32_t));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ u_int32_t cache;
+ int ch, checked, d_close, e_close, exitval, fast, flags;
+ int nflag, private, resize, ret;
+ char *db, *home, *internal, *passwd, *subdb;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = fast = flags = nflag = private = 0;
+ db = home = internal = passwd = subdb = NULL;
+
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF)
+ switch (ch) {
+ case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ if (!argcheck(internal = optarg, "Aclmop"))
+ return (usage());
+ break;
+ case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ db = optarg;
+ break;
+ case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_ENV;
+ break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOG;
+ break;
+ case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ if (!argcheck(internal = optarg, "Ahm"))
+ return (usage());
+ break;
+ case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_REP;
+ break;
+ case 's':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ subdb = optarg;
+ break;
+ case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ return (usage());
+ break;
+ case T_NOTSET:
+ return (usage());
+ /* NOTREACHED */
+ default:
+ if (fast != 0)
+ return (usage());
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /* Initialize the environment. */
+ if (db_init(dbenv, home, ttype, cache, &private) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if (flags != 0)
+ return (usage());
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ d_close = 1;
+
+ if ((ret = dbp->open(dbp,
+ NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", db);
+ goto shutdown;
+ }
+
+ /* Check if cache is too small for this DB's pagesize. */
+ if (private) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto shutdown;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto shutdown;
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp, NULL,
+ db, subdb, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->open: %s:%s", db, subdb);
+ (void)alt_dbp->close(alt_dbp, 0);
+ goto shutdown;
+ }
+
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (hash_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (queue_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ dbenv->errx(dbenv, "Unknown database type.");
+ goto shutdown;
+ }
+ break;
+ case T_ENV:
+ if (env_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_LOCK:
+ if (lock_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_LOG:
+ if (log_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_MPOOL:
+ if (mpool_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_REP:
+ if (rep_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_TXN:
+ if (txn_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_NOTSET:
+ dbenv->errx(dbenv, "Unknown statistics flag.");
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.envpanic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+ int fast;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ prflags(sp->hash_metaflags, fn);
+ dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
+ dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
+ dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ dl("Last allocated locker ID.\n", (u_long)sp->st_id);
+ dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid);
+ dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ dl("Maximum number of lock objects possible.\n",
+ (u_long)sp->st_maxobjects);
+ dl("Number of current locks.\n", (u_long)sp->st_nlocks);
+ dl("Maximum number of locks at any one time.\n",
+ (u_long)sp->st_maxnlocks);
+ dl("Number of current lockers.\n", (u_long)sp->st_nlockers);
+ dl("Maximum number of lockers at any one time.\n",
+ (u_long)sp->st_maxnlockers);
+ dl("Number of current lock objects.\n", (u_long)sp->st_nobjects);
+ dl("Maximum number of lock objects at any one time.\n",
+ (u_long)sp->st_maxnobjects);
+ dl("Total number of locks requested.\n", (u_long)sp->st_nrequests);
+ dl("Total number of locks released.\n", (u_long)sp->st_nreleases);
+ dl(
+ "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n",
+ (u_long)sp->st_nnowaits);
+ dl(
+ "Total number of locks not immediately available due to conflicts.\n",
+ (u_long)sp->st_nconflicts);
+ dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ dl("Lock timeout value.\n", (u_long)sp->st_locktimeout);
+ dl("Number of locks that have timed out.\n",
+ (u_long)sp->st_nlocktimeouts);
+ dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout);
+ dl("Number of transactions that have timed out.\n",
+ (u_long)sp->st_ntxntimeouts);
+
+ dl_bytes("The size of the lock region.",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_size % MEGABYTE == 0)
+ printf("%luMb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / MEGABYTE);
+ else if (sp->st_lg_size % 1024 == 0)
+ printf("%luKb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / 1024);
+ else
+ printf("%lu\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size);
+ dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+
+ dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ dl_bytes("Pool individual cache size",
+ (u_long)0, (u_long)0, (u_long)gsp->st_regsize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ dl("Dirty pages written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ dl("Current total page count.\n",
+ (u_long)gsp->st_pages);
+ dl("Current clean page count.\n",
+ (u_long)gsp->st_page_clean);
+ dl("Current dirty page count.\n",
+ (u_long)gsp->st_page_dirty);
+ dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ dl("The number of hash bucket locks granted without waiting.\n",
+ (u_long)gsp->st_hash_nowait);
+ dl("The number of hash bucket locks granted after waiting.\n",
+ (u_long)gsp->st_hash_wait);
+ dl("The maximum number of times any hash bucket lock was waited for.\n",
+ (u_long)gsp->st_hash_max_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+ dl("The number of page allocations.\n",
+ (u_long)gsp->st_alloc);
+ dl("The number of hash buckets examined during allocations\n",
+ (u_long)gsp->st_alloc_buckets);
+ dl("The max number of hash buckets examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_buckets);
+ dl("The number of pages examined during allocations\n",
+ (u_long)gsp->st_alloc_pages);
+ dl("The max number of pages examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_pages);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ free(gsp);
+
+ return (0);
+}
+
+/*
+ * rep_stats --
+ * Display replication statistics.
+ */
+int
+rep_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_REP_STAT *sp;
+ int is_client, ret;
+ const char *p;
+
+ if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ is_client = 0;
+ switch (sp->st_status) {
+ case DB_REP_MASTER:
+ printf("Environment configured as a replication master.\n");
+ break;
+ case DB_REP_CLIENT:
+ printf("Environment configured as a replication client.\n");
+ is_client = 1;
+ break;
+ case DB_REP_LOGSONLY:
+ printf("Environment configured as a logs-only replica.\n");
+ is_client = 1;
+ break;
+ default:
+ printf("Environment not configured for replication.\n");
+ break;
+ }
+
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset,
+ is_client ? "Next LSN expected." : "Next LSN to be used.");
+ p = sp->st_waiting_lsn.file == 0 ?
+ "Not waiting for any missed log records." :
+ "LSN of first missed log record being waited for.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset,
+ p);
+
+ dl("Number of duplicate master conditions detected.\n",
+ (u_long)sp->st_dupmasters);
+ if (sp->st_env_id != DB_EID_INVALID)
+ dl("Current environment ID.\n", (u_long)sp->st_env_id);
+ else
+ printf("No current environment ID.\n");
+ dl("Current environment priority.\n", (u_long)sp->st_env_priority);
+ dl("Current generation number.\n", (u_long)sp->st_gen);
+ dl("Number of duplicate log records received.\n",
+ (u_long)sp->st_log_duplicated);
+ dl("Number of log records currently queued.\n",
+ (u_long)sp->st_log_queued);
+ dl("Maximum number of log records ever queued at once.\n",
+ (u_long)sp->st_log_queued_max);
+ dl("Total number of log records queued.\n",
+ (u_long)sp->st_log_queued_total);
+ dl("Number of log records received and appended to the log.\n",
+ (u_long)sp->st_log_records);
+ dl("Number of log records missed and requested.\n",
+ (u_long)sp->st_log_requested);
+ if (sp->st_master != DB_EID_INVALID)
+ dl("Current master ID.\n", (u_long)sp->st_master);
+ else
+ printf("No current master ID.\n");
+ dl("Number of times the master has changed.\n",
+ (u_long)sp->st_master_changes);
+ dl("Number of messages received with a bad generation number.\n",
+ (u_long)sp->st_msgs_badgen);
+ dl("Number of messages received and processed.\n",
+ (u_long)sp->st_msgs_processed);
+ dl("Number of messages ignored due to pending recovery.\n",
+ (u_long)sp->st_msgs_recover);
+ dl("Number of failed message sends.\n",
+ (u_long)sp->st_msgs_send_failures);
+ dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent);
+ dl("Number of new site messages received.\n", (u_long)sp->st_newsites);
+ dl("Transmission limited.\n", (u_long)sp->st_nthrottles);
+ dl("Number of outdated conditions detected.\n",
+ (u_long)sp->st_outdated);
+ dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied);
+
+ dl("Number of elections held.\n", (u_long)sp->st_elections);
+ dl("Number of elections won.\n", (u_long)sp->st_elections_won);
+
+ if (sp->st_election_status == 0)
+ printf("No election in progress.\n");
+ else {
+ dl("Current election phase.\n", (u_long)sp->st_election_status);
+ dl("Election winner.\n",
+ (u_long)sp->st_election_cur_winner);
+ dl("Election generation number.\n",
+ (u_long)sp->st_election_gen);
+ printf("%lu/%lu\tMaximum LSN of election winner.\n",
+ (u_long)sp->st_election_lsn.file,
+ (u_long)sp->st_election_lsn.offset);
+ dl("Number of sites expected to participate in elections.\n",
+ (u_long)sp->st_election_nsites);
+ dl("Election priority.\n", (u_long)sp->st_election_priority);
+ dl("Election tiebreaker value.\n",
+ (u_long)sp->st_election_tiebreaker);
+ dl("Votes received this election round.\n",
+ (u_long)sp->st_election_votes);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ dl("Active transactions.\n", (u_long)sp->st_nactive);
+ dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
+ dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; begin LSN: file/offset %lu/%lu",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+int
+txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+
+ /* Normalize the values. */
+ while (bytes >= MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes >= GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ mbytes -= GIGABYTE / MEGABYTE;
+ }
+
+ sep = "";
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ }
+ if (bytes >= 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+
+ printf("\t%s.\n", msg);
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_init(dbenv, home, ttype, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ test_t ttype;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t oflags;
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
+ */
+ *is_private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created. Declare a reasonably large cache so that we don't fail
+ * when reporting statistics on large databases.
+ *
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ *is_private = 1;
+ oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON;
+ if (ttype == T_DB)
+ oflags |= DB_INIT_MPOOL;
+ if (ttype == T_LOG)
+ oflags |= DB_INIT_LOG;
+ if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_stat [-celmNrtVZ] [-C Aclmop]",
+ "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_upgrade/db_upgrade.c b/storage/bdb/db_upgrade/db_upgrade.c
new file mode 100644
index 00000000000..f46b5eabc4e
--- /dev/null
+++ b/storage/bdb/db_upgrade/db_upgrade.c
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_upgrade.c,v 1.31 2002/03/28 20:13:47 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_upgrade";
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/db_verify/db_verify.c b/storage/bdb/db_verify/db_verify.c
new file mode 100644
index 00000000000..8d63a20e7bc
--- /dev/null
+++ b/storage/bdb/db_verify/db_verify.c
@@ -0,0 +1,248 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_verify.c,v 1.38 2002/08/08 03:51:38 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ const char *progname = "db_verify";
+ DB *dbp, *dbp1;
+ DB_ENV *dbenv;
+ u_int32_t cache;
+ int ch, d_close, e_close, exitval, nflag, oflag, private;
+ int quiet, resize, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = nflag = oflag = quiet = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'o':
+ oflag = 1;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
+ */
+ private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ private = 1;
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+ d_close = 1;
+
+ /*
+ * We create a 2nd dbp to this database to get its pagesize
+ * because the dbp we're using for verify cannot be opened.
+ */
+ if (private) {
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv->err(
+ dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+
+ if ((ret = dbp1->open(dbp1, NULL,
+ argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: %s", argv[0]);
+ (void)dbp1->close(dbp1, 0);
+ goto shutdown;
+ }
+ /*
+ * If we get here, we can check the cache/page.
+ * !!!
+ * If we have to retry with an env with a larger
+ * cache, we jump out of this loop. However, we
+ * will still be working on the same argv when we
+ * get back into the for-loop.
+ */
+ ret = __db_util_cache(dbenv, dbp1, &cache, &resize);
+ (void)dbp1->close(dbp1, 0);
+ if (ret != 0)
+ goto shutdown;
+
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+ if ((ret = dbp->verify(dbp,
+ argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ d_close = 0;
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/storage/bdb/dbinc/btree.h b/storage/bdb/dbinc/btree.h
new file mode 100644
index 00000000000..54da9c5b208
--- /dev/null
+++ b/storage/bdb/dbinc/btree.h
@@ -0,0 +1,320 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995, 1996
+ * Keith Bostic. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Olson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: btree.h,v 11.45 2002/08/06 06:11:21 bostic Exp $
+ */
+#ifndef _DB_BTREE_H_
+#define _DB_BTREE_H_
+
+/* Forward structure declarations. */
+struct __btree; typedef struct __btree BTREE;
+struct __cursor; typedef struct __cursor BTREE_CURSOR;
+struct __epg; typedef struct __epg EPG;
+struct __recno; typedef struct __recno RECNO;
+
+#define DEFMINKEYPAGE (2)
+
+/*
+ * A recno order of 0 indicates that we don't have an order, not that we've
+ * an order less than 1.
+ */
+#define INVALID_ORDER 0
+
+#define ISINTERNAL(p) (TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO)
+#define ISLEAF(p) (TYPE(p) == P_LBTREE || \
+ TYPE(p) == P_LRECNO || TYPE(p) == P_LDUP)
+
+/* Flags for __bam_cadjust_log(). */
+#define CAD_UPDATEROOT 0x01 /* Root page count was updated. */
+
+/* Flags for __bam_split_log(). */
+#define SPL_NRECS 0x01 /* Split tree has record count. */
+
+/* Flags for __bam_iitem(). */
+#define BI_DELETED 0x01 /* Key/data pair only placeholder. */
+
+/* Flags for __bam_stkrel(). */
+#define STK_CLRDBC 0x01 /* Clear dbc->page reference. */
+#define STK_NOLOCK 0x02 /* Don't retain locks. */
+
+/* Flags for __ram_ca(). These get logged, so make the values explicit. */
+typedef enum {
+ CA_DELETE = 0, /* Delete the current record. */
+ CA_IAFTER = 1, /* Insert before the current record. */
+ CA_IBEFORE = 2, /* Insert after the current record. */
+ CA_ICURRENT = 3 /* Overwrite the current record. */
+} ca_recno_arg;
+
+/*
+ * Flags for __bam_search() and __bam_rsearch().
+ *
+ * Note, internal page searches must find the largest record less than key in
+ * the tree so that descents work. Leaf page searches must find the smallest
+ * record greater than key so that the returned index is the record's correct
+ * position for insertion.
+ *
+ * The flags parameter to the search routines describes three aspects of the
+ * search: the type of locking required (including if we're locking a pair of
+ * pages), the item to return in the presence of duplicates and whether or not
+ * to return deleted entries. To simplify both the mnemonic representation
+ * and the code that checks for various cases, we construct a set of bitmasks.
+ */
+#define S_READ 0x00001 /* Read locks. */
+#define S_WRITE 0x00002 /* Write locks. */
+
+#define S_APPEND 0x00040 /* Append to the tree. */
+#define S_DELNO 0x00080 /* Don't return deleted items. */
+#define S_DUPFIRST 0x00100 /* Return first duplicate. */
+#define S_DUPLAST 0x00200 /* Return last duplicate. */
+#define S_EXACT 0x00400 /* Exact items only. */
+#define S_PARENT 0x00800 /* Lock page pair. */
+#define S_STACK 0x01000 /* Need a complete stack. */
+#define S_PAST_EOF 0x02000 /* If doing insert search (or keyfirst
+ * or keylast operations), or a split
+ * on behalf of an insert, it's okay to
+ * return an entry one past end-of-page.
+ */
+#define S_STK_ONLY 0x04000 /* Just return info in the stack */
+
+#define S_DELETE (S_WRITE | S_DUPFIRST | S_DELNO | S_EXACT | S_STACK)
+#define S_FIND (S_READ | S_DUPFIRST | S_DELNO)
+#define S_FIND_WR (S_WRITE | S_DUPFIRST | S_DELNO)
+#define S_INSERT (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_KEYFIRST (S_WRITE | S_DUPFIRST | S_PAST_EOF | S_STACK)
+#define S_KEYLAST (S_WRITE | S_DUPLAST | S_PAST_EOF | S_STACK)
+#define S_WRPAIR (S_WRITE | S_DUPLAST | S_PAST_EOF | S_PARENT)
+
+/*
+ * Various routines pass around page references. A page reference is
+ * a pointer to the page, and the indx indicates an item on the page.
+ * Each page reference may include a lock.
+ */
+struct __epg {
+ PAGE *page; /* The page. */
+ db_indx_t indx; /* The index on the page. */
+ db_indx_t entries; /* The number of entries on page */
+ DB_LOCK lock; /* The page's lock. */
+ db_lockmode_t lock_mode; /* The lock mode. */
+};
+
+/*
+ * We maintain a stack of the pages that we're locking in the tree. Grow
+ * the stack as necessary.
+ *
+ * XXX
+ * Temporary fix for #3243 -- clear the page and lock from the stack entry.
+ * The correct fix is to never release a stack that doesn't hold items.
+ */
+#define BT_STK_CLR(c) do { \
+ (c)->csp = (c)->sp; \
+ (c)->csp->page = NULL; \
+ LOCK_INIT((c)->csp->lock); \
+} while (0)
+
+#define BT_STK_ENTER(dbenv, c, pagep, page_indx, l, mode, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = pagep; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ (c)->csp->lock = l; \
+ (c)->csp->lock_mode = mode; \
+ } \
+} while (0)
+
+#define BT_STK_PUSH(dbenv, c, pagep, page_indx, lock, mode, ret) do { \
+ BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_NUM(dbenv, c, pagep, page_indx, ret) do { \
+ if ((ret = \
+ (c)->csp == (c)->esp ? __bam_stkgrow(dbenv, c) : 0) == 0) { \
+ (c)->csp->page = NULL; \
+ (c)->csp->indx = page_indx; \
+ (c)->csp->entries = NUM_ENT(pagep); \
+ LOCK_INIT((c)->csp->lock); \
+ (c)->csp->lock_mode = DB_LOCK_NG; \
+ } \
+} while (0)
+
+#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx, ret) do { \
+ BT_STK_NUM(dbenv, cp, pagep, page_indx, ret); \
+ ++(c)->csp; \
+} while (0)
+
+#define BT_STK_POP(c) \
+ ((c)->csp == (c)->sp ? NULL : --(c)->csp)
+
+/* Btree/Recno cursor. */
+struct __cursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* btree private part */
+ EPG *sp; /* Stack pointer. */
+ EPG *csp; /* Current stack entry. */
+ EPG *esp; /* End stack pointer. */
+ EPG stack[5];
+
+ db_indx_t ovflsize; /* Maximum key/data on-page size. */
+
+ db_recno_t recno; /* Current record number. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+ /*
+ * Btree:
+ * We set a flag in the cursor structure if the underlying object has
+ * been deleted. It's not strictly necessary, we could get the same
+ * information by looking at the page itself, but this method doesn't
+ * require us to retrieve the page on cursor delete.
+ *
+ * Recno:
+ * When renumbering recno databases during deletes, cursors referencing
+ * "deleted" records end up positioned between two records, and so must
+ * be specially adjusted on the next operation.
+ */
+#define C_DELETED 0x0001 /* Record was deleted. */
+ /*
+ * There are three tree types that require maintaining record numbers.
+ * Recno AM trees, Btree AM trees for which the DB_RECNUM flag was set,
+ * and Btree off-page duplicate trees.
+ */
+#define C_RECNUM 0x0002 /* Tree requires record counts. */
+ /*
+ * Recno trees have immutable record numbers by default, but optionally
+ * support mutable record numbers. Off-page duplicate Recno trees have
+ * mutable record numbers. All Btrees with record numbers (including
+ * off-page duplicate trees) are mutable by design, no flag is needed.
+ */
+#define C_RENUMBER 0x0004 /* Tree records are mutable. */
+ u_int32_t flags;
+};
+
+/*
+ * Threshhold value, as a function of bt_minkey, of the number of
+ * bytes a key/data pair can use before being placed on an overflow
+ * page. Assume every item requires the maximum alignment for
+ * padding, out of sheer paranoia.
+ */
+#define B_MINKEY_TO_OVFLSIZE(dbp, minkey, pgsize) \
+ ((u_int16_t)(((pgsize) - P_OVERHEAD(dbp)) / ((minkey) * P_INDX) -\
+ (BKEYDATA_PSIZE(0) + ALIGN(1, sizeof(int32_t)))))
+
+/*
+ * The maximum space that a single item can ever take up on one page.
+ * Used by __bam_split to determine whether a split is still necessary.
+ */
+#define B_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define B_MAXSIZEONPAGE(ovflsize) \
+ (B_MAX(BOVERFLOW_PSIZE, BKEYDATA_PSIZE(ovflsize)))
+
+/*
+ * The in-memory, per-tree btree/recno data structure.
+ */
+struct __btree { /* Btree access method. */
+ /*
+ * !!!
+ * These fields are write-once (when the structure is created) and
+ * so are ignored as far as multi-threading is concerned.
+ */
+ db_pgno_t bt_meta; /* Database meta-data page. */
+ db_pgno_t bt_root; /* Database root page. */
+
+ u_int32_t bt_maxkey; /* Maximum keys per page. */
+ u_int32_t bt_minkey; /* Minimum keys per page. */
+
+ /* Btree comparison function. */
+ int (*bt_compare) __P((DB *, const DBT *, const DBT *));
+ /* Btree prefix function. */
+ size_t (*bt_prefix) __P((DB *, const DBT *, const DBT *));
+
+ /* Recno access method. */
+ int re_pad; /* Fixed-length padding byte. */
+ int re_delim; /* Variable-length delimiting byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ char *re_source; /* Source file name. */
+
+ /*
+ * !!!
+ * The bt_lpgno field is NOT protected by any mutex, and for this
+ * reason must be advisory only, so, while it is read/written by
+ * multiple threads, DB is completely indifferent to the quality
+ * of its information.
+ */
+ db_pgno_t bt_lpgno; /* Last insert location. */
+
+ /*
+ * !!!
+ * The re_modified field is NOT protected by any mutex, and for this
+ * reason cannot be anything more complicated than a zero/non-zero
+ * value. The actual writing of the backing source file cannot be
+ * threaded, so clearing the flag isn't a problem.
+ */
+ int re_modified; /* If the tree was modified. */
+
+ /*
+ * !!!
+ * These fields are ignored as far as multi-threading is concerned.
+ * There are no transaction semantics associated with backing files,
+ * nor is there any thread protection.
+ */
+ FILE *re_fp; /* Source file handle. */
+ int re_eof; /* Backing source file EOF reached. */
+ db_recno_t re_last; /* Last record number read. */
+};
+
+/*
+ * Modes for the __bam_curadj recovery records (btree_curadj).
+ * These appear in log records, so we wire the values and
+ * do not leave it up to the compiler.
+ */
+typedef enum {
+ DB_CA_DI = 1,
+ DB_CA_DUP = 2,
+ DB_CA_RSPLIT = 3,
+ DB_CA_SPLIT = 4
+} db_ca_mode;
+
+#include "dbinc_auto/btree_auto.h"
+#include "dbinc_auto/btree_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_BTREE_H_ */
diff --git a/storage/bdb/dbinc/crypto.h b/storage/bdb/dbinc/crypto.h
new file mode 100644
index 00000000000..92fad098a4a
--- /dev/null
+++ b/storage/bdb/dbinc/crypto.h
@@ -0,0 +1,78 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: crypto.h,v 1.9 2002/08/06 06:37:07 bostic Exp $
+ */
+
+#ifndef _DB_CRYPTO_H_
+#define _DB_CRYPTO_H_
+
+/*
+ * !!!
+ * These are the internal representations of the algorithm flags.
+ * They are used in both the DB_CIPHER structure and the CIPHER
+ * structure so we can tell if users specified both passwd and alg
+ * correctly.
+ *
+ * CIPHER_ANY is used when an app joins an existing env but doesn't
+ * know the algorithm originally used. This is only valid in the
+ * DB_CIPHER structure until we open and can set the alg.
+ */
+/*
+ * We store the algorithm in an 8-bit field on the meta-page. So we
+ * use a numeric value, not bit fields.
+ * now we are limited to 8 algorithms before we cannot use bits and
+ * need numeric values. That should be plenty. It is okay for the
+ * CIPHER_ANY flag to go beyond that since that is never stored on disk.
+ */
+
+/*
+ * This structure is per-process, not in shared memory.
+ */
+struct __db_cipher {
+ int (*adj_size) __P((size_t));
+ int (*close) __P((DB_ENV *, void *));
+ int (*decrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*encrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*init) __P((DB_ENV *, DB_CIPHER *));
+
+ u_int8_t mac_key[DB_MAC_KEY]; /* MAC key. */
+ void *data; /* Algorithm-specific information */
+
+#define CIPHER_AES 1 /* AES algorithm */
+ u_int8_t alg; /* Algorithm used - See above */
+ u_int8_t spare[3]; /* Spares */
+
+#define CIPHER_ANY 0x00000001 /* Only for DB_CIPHER */
+ u_int32_t flags; /* Other flags */
+};
+
+#ifdef HAVE_CRYPTO
+
+#include "crypto/rijndael/rijndael-api-fst.h"
+
+/*
+ * Shared ciphering structure
+ * No DB_MUTEX needed because all information is read-only after creation.
+ */
+typedef struct __cipher {
+ roff_t passwd; /* Offset to shared passwd */
+ size_t passwd_len; /* Length of passwd */
+ u_int32_t flags; /* Algorithm used - see above */
+} CIPHER;
+
+#define DB_AES_KEYLEN 128 /* AES key length */
+#define DB_AES_CHUNK 16 /* AES byte unit size */
+
+typedef struct __aes_cipher {
+ keyInstance decrypt_ki; /* Decryption key instance */
+ keyInstance encrypt_ki; /* Encryption key instance */
+ u_int32_t flags; /* AES-specific flags */
+} AES_CIPHER;
+
+#include "dbinc_auto/crypto_ext.h"
+#endif /* HAVE_CRYPTO */
+#endif /* !_DB_CRYPTO_H_ */
diff --git a/storage/bdb/dbinc/cxx_common.h b/storage/bdb/dbinc/cxx_common.h
new file mode 100644
index 00000000000..e5cb3a9aef4
--- /dev/null
+++ b/storage/bdb/dbinc/cxx_common.h
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_common.h,v 11.2 2002/01/11 15:52:23 bostic Exp $
+ */
+
+#ifndef _CXX_COMMON_H_
+#define _CXX_COMMON_H_
+
+//
+// Common definitions used by all of Berkeley DB's C++ include files.
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Mechanisms for declaring classes
+//
+
+//
+// Every class defined in this file has an _exported next to the class name.
+// This is needed for WinTel machines so that the class methods can
+// be exported or imported in a DLL as appropriate. Users of the DLL
+// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL
+// must be defined.
+//
+#if defined(_MSC_VER)
+
+# if defined(DB_CREATE_DLL)
+# define _exported __declspec(dllexport) // creator of dll
+# elif defined(DB_USE_DLL)
+# define _exported __declspec(dllimport) // user of dll
+# else
+# define _exported // static lib creator or user
+# endif
+
+#else /* _MSC_VER */
+
+# define _exported
+
+#endif /* _MSC_VER */
+#endif /* !_CXX_COMMON_H_ */
diff --git a/storage/bdb/dbinc/cxx_except.h b/storage/bdb/dbinc/cxx_except.h
new file mode 100644
index 00000000000..f9bf4f859f8
--- /dev/null
+++ b/storage/bdb/dbinc/cxx_except.h
@@ -0,0 +1,141 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_except.h,v 11.5 2002/08/01 23:32:34 mjc Exp $
+ */
+
+#ifndef _CXX_EXCEPT_H_
+#define _CXX_EXCEPT_H_
+
+#include "cxx_common.h"
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+class DbDeadlockException; // forward
+class DbException; // forward
+class DbLockNotGrantedException; // forward
+class DbLock; // forward
+class DbMemoryException; // forward
+class DbRunRecoveryException; // forward
+class Dbt; // forward
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Exception classes
+//
+
+// Almost any error in the DB library throws a DbException.
+// Every exception should be considered an abnormality
+// (e.g. bug, misuse of DB, file system error).
+//
+// NOTE: We would like to inherit from class exception and
+// let it handle what(), but there are
+// MSVC++ problems when <exception> is included.
+//
+class _exported DbException
+{
+public:
+ virtual ~DbException();
+ DbException(int err);
+ DbException(const char *description);
+ DbException(const char *prefix, int err);
+ DbException(const char *prefix1, const char *prefix2, int err);
+ int get_errno() const;
+ virtual const char *what() const;
+
+ DbException(const DbException &);
+ DbException &operator = (const DbException &);
+
+private:
+ char *what_;
+ int err_; // errno
+};
+
+//
+// A specific sort of exception that occurs when
+// an operation is aborted to resolve a deadlock.
+//
+class _exported DbDeadlockException : public DbException
+{
+public:
+ virtual ~DbDeadlockException();
+ DbDeadlockException(const char *description);
+
+ DbDeadlockException(const DbDeadlockException &);
+ DbDeadlockException &operator = (const DbDeadlockException &);
+};
+
+//
+// A specific sort of exception that occurs when
+// a lock is not granted, e.g. by lock_get or lock_vec.
+// Note that the Dbt is only live as long as the Dbt used
+// in the offending call.
+//
+class _exported DbLockNotGrantedException : public DbException
+{
+public:
+ virtual ~DbLockNotGrantedException();
+ DbLockNotGrantedException(const char *prefix, db_lockop_t op,
+ db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index);
+ DbLockNotGrantedException(const DbLockNotGrantedException &);
+ DbLockNotGrantedException &operator =
+ (const DbLockNotGrantedException &);
+
+ db_lockop_t get_op() const;
+ db_lockmode_t get_mode() const;
+ const Dbt* get_obj() const;
+ DbLock *get_lock() const;
+ int get_index() const;
+
+private:
+ db_lockop_t op_;
+ db_lockmode_t mode_;
+ const Dbt *obj_;
+ DbLock *lock_;
+ int index_;
+};
+
+//
+// A specific sort of exception that occurs when
+// user declared memory is insufficient in a Dbt.
+//
+class _exported DbMemoryException : public DbException
+{
+public:
+ virtual ~DbMemoryException();
+ DbMemoryException(Dbt *dbt);
+ DbMemoryException(const char *description);
+ DbMemoryException(const char *prefix, Dbt *dbt);
+ DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt);
+ Dbt *get_dbt() const;
+
+ DbMemoryException(const DbMemoryException &);
+ DbMemoryException &operator = (const DbMemoryException &);
+
+private:
+ Dbt *dbt_;
+};
+
+//
+// A specific sort of exception that occurs when
+// recovery is required before continuing DB activity.
+//
+class _exported DbRunRecoveryException : public DbException
+{
+public:
+ virtual ~DbRunRecoveryException();
+ DbRunRecoveryException(const char *description);
+
+ DbRunRecoveryException(const DbRunRecoveryException &);
+ DbRunRecoveryException &operator = (const DbRunRecoveryException &);
+};
+
+#endif /* !_CXX_EXCEPT_H_ */
diff --git a/storage/bdb/dbinc/cxx_int.h b/storage/bdb/dbinc/cxx_int.h
new file mode 100644
index 00000000000..9af3979d9f1
--- /dev/null
+++ b/storage/bdb/dbinc/cxx_int.h
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_int.h,v 11.20 2002/01/11 15:52:23 bostic Exp $
+ */
+
+#ifndef _CXX_INT_H_
+#define _CXX_INT_H_
+
+// private data structures known to the implementation only
+
+//
+// Using FooImp classes will allow the implementation to change in the
+// future without any modification to user code or even to header files
+// that the user includes. FooImp * is just like void * except that it
+// provides a little extra protection, since you cannot randomly assign
+// any old pointer to a FooImp* as you can with void *. Currently, a
+// pointer to such an opaque class is always just a pointer to the
+// appropriate underlying implementation struct. These are converted
+// back and forth using the various overloaded wrap()/unwrap() methods.
+// This is essentially a use of the "Bridge" Design Pattern.
+//
+// WRAPPED_CLASS implements the appropriate wrap() and unwrap() methods
+// for a wrapper class that has an underlying pointer representation.
+//
+#define WRAPPED_CLASS(_WRAPPER_CLASS, _IMP_CLASS, _WRAPPED_TYPE) \
+ \
+ class _IMP_CLASS {}; \
+ \
+ inline _WRAPPED_TYPE unwrap(_WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((_WRAPPED_TYPE)((void *)(val->imp()))); \
+ } \
+ \
+ inline const _WRAPPED_TYPE unwrapConst(const _WRAPPER_CLASS *val) \
+ { \
+ if (!val) return (0); \
+ return ((const _WRAPPED_TYPE)((void *)(val->constimp()))); \
+ } \
+ \
+ inline _IMP_CLASS *wrap(_WRAPPED_TYPE val) \
+ { \
+ return ((_IMP_CLASS*)((void *)val)); \
+ }
+
+WRAPPED_CLASS(DbMpoolFile, DbMpoolFileImp, DB_MPOOLFILE*)
+WRAPPED_CLASS(Db, DbImp, DB*)
+WRAPPED_CLASS(DbEnv, DbEnvImp, DB_ENV*)
+WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN*)
+
+// A tristate integer value used by the DB_ERROR macro below.
+// We chose not to make this an enumerated type so it can
+// be kept private, even though methods that return the
+// tristate int can be declared in db_cxx.h .
+//
+#define ON_ERROR_THROW 1
+#define ON_ERROR_RETURN 0
+#define ON_ERROR_UNKNOWN (-1)
+
+// Macros that handle detected errors, in case we want to
+// change the default behavior. The 'policy' is one of
+// the tristate values given above. If UNKNOWN is specified,
+// the behavior is taken from the last initialized DbEnv.
+//
+#define DB_ERROR(caller, ecode, policy) \
+ DbEnv::runtime_error(caller, ecode, policy)
+
+#define DB_ERROR_DBT(caller, dbt, policy) \
+ DbEnv::runtime_error_dbt(caller, dbt, policy)
+
+#define DB_OVERFLOWED_DBT(dbt) \
+ (F_ISSET(dbt, DB_DBT_USERMEM) && dbt->size > dbt->ulen)
+
+/* values for Db::flags_ */
+#define DB_CXX_PRIVATE_ENV 0x00000001
+
+#endif /* !_CXX_INT_H_ */
diff --git a/storage/bdb/dbinc/db.in b/storage/bdb/dbinc/db.in
new file mode 100644
index 00000000000..208de3bd622
--- /dev/null
+++ b/storage/bdb/dbinc/db.in
@@ -0,0 +1,1883 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db.in,v 11.323 2002/09/03 17:27:16 bostic Exp $
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@
+#define DB_VERSION_MINOR @DB_VERSION_MINOR@
+#define DB_VERSION_PATCH @DB_VERSION_PATCH@
+#define DB_VERSION_STRING @DB_VERSION_STRING@
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+@u_char_decl@
+@u_short_decl@
+@u_int_decl@
+@u_long_decl@
+@ssize_t_decl@
+
+/* Basic types that are exported or quasi-exported. */
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef u_int32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are currently limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions.
+ */
+typedef u_int32_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+struct __mutex_t; typedef struct __mutex_t DB_MUTEX;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+ void *app_private; /* Application-private handle. */
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TRUNCATE 0x000080 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x000100 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000200 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000400 /* Use the environment if root. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
+ * DB->remove, DB->rename, DB->truncate
+ * DB_DIRTY_READ:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
+ * DB_ENV->txn_begin
+ *
+ * Shared flags up to 0x000400 */
+#define DB_AUTO_COMMIT 0x00800000 /* Implied transaction. */
+#define DB_DIRTY_READ 0x01000000 /* Dirty Read. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000001 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000001 /* Open in an XA environment. */
+
+/*
+ * Flags private to DB_ENV->open.
+ * Shared flags up to 0x000400 */
+#define DB_INIT_CDB 0x000800 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x001000 /* Initialize locking. */
+#define DB_INIT_LOG 0x002000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x004000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x008000 /* Initialize transactions. */
+#define DB_JOINENV 0x010000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x020000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x040000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x080000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x100000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x000400 */
+#define DB_EXCL 0x000800 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x001000 /* UNDOC: fcntl(2) locking. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x004000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x000400 */
+#define DB_TXN_NOWAIT 0x000800 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x001000 /* Always sync log on commit. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x000400 */
+#define DB_CDB_ALLDB 0x000800 /* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x001000 /* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x002000 /* Don't buffer log files in the OS. */
+#define DB_NOLOCKING 0x004000 /* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x008000 /* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x010000 /* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x020000 /* Set panic state per environment. */
+#define DB_REGION_INIT 0x040000 /* Page-fault regions on open. */
+#define DB_TXN_WRITE_NOSYNC 0x080000 /* Write, don't sync, on txn commit. */
+#define DB_YIELDCPU 0x100000 /* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000001 /* Upgrading. */
+#define DB_VERIFY 0x000002 /* Verifying. */
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x000400 */
+#define DB_DIRECT 0x000800 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x001000 /* UNDOC: dealing with an extent. */
+#define DB_ODDFILESIZE 0x002000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ */
+#define DB_CHKSUM_SHA1 0x000001 /* Use SHA1 checksumming */
+#define DB_DUP 0x000002 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x000004 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x000008 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x000010 /* Btree: record numbers. */
+#define DB_RENUMBER 0x000020 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x000040 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x000080 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB->stat methods.
+ */
+#define DB_STAT_CLEAR 0x000001 /* Clear stat after returning values. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x000040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->set_rep_transport's send callback.
+ */
+#define DB_REP_PERMANENT 0x0001 /* Important--app. may want to flush. */
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */
+#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */
+#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */
+#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 7 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_FREE_LOCKER 0x001 /* Internal: Free locker as well. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */
+#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_DIRTY=7, /* Dirty Read. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR=2, /* Lock is bad. */
+ DB_LSTAT_EXPIRED=3, /* Lock has expired. */
+ DB_LSTAT_FREE=4, /* Lock is unallocated. */
+ DB_LSTAT_HELD=5, /* Lock is currently held. */
+ DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting
+ * was removed */
+ DB_LSTAT_PENDING=7, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ u_int32_t st_id; /* Last allocated locker ID. */
+ u_int32_t st_cur_maxid; /* Current maximum unused ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum num of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+#define DB_TXN_LOCK 4
+ u_int32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 7 /* Current log version. */
+#define DB_LOGOLDVER 7 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *c_fh; /* File handle. */
+ DB_LSN c_lsn; /* Cursor: LSN */
+ u_int32_t c_len; /* Cursor: record length */
+ u_int32_t c_prev; /* Cursor: previous record's offset */
+
+ DBT c_dbt; /* Return DBT. */
+
+#define DB_LOGC_BUF_SIZE (32 * 1024)
+ u_int8_t *bp; /* Allocated read buffer. */
+ u_int32_t bp_size; /* Read buffer length in bytes. */
+ u_int32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ u_int32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* Methods. */
+ int (*close) __P((DB_LOGC *, u_int32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ u_int32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_size; /* Log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_disk_file; /* Known on disk log file number. */
+ u_int32_t st_disk_offset; /* Known on disk log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */
+};
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+
+/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Priority values for DB_MPOOLFILE->set_priority. */
+typedef enum {
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH *fhp; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * The pinref and q fields are protected by the region lock, not the
+ * DB_MPOOLFILE structure mutex. We don't use the structure mutex
+ * because then I/O (which holds the structure lock held because of
+ * the race between the seek and write of the file descriptor) would
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * These fields are not thread-protected because they are initialized
+ * when the file is opened and never modified.
+ */
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* Methods. */
+ int (*close) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ void (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ void (*last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ void (*refcnt) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ void (*set_unlink) __P((DB_MPOOLFILE *, int));
+ int (*sync) __P((DB_MPOOLFILE *));
+
+ /*
+ * MP_OPEN_CALLED and MP_READONLY do not need to be thread protected
+ * because they are initialized when the file is opened, and never
+ * modified.
+ *
+ * MP_FLUSH, MP_UPGRADE and MP_UPGRADE_FAIL are thread protected
+ * becase they are potentially read by multiple threads of control.
+ */
+#define MP_FLUSH 0x001 /* Was opened to flush a buffer. */
+#define MP_OPEN_CALLED 0x002 /* File opened. */
+#define MP_READONLY 0x004 /* File is readonly. */
+#define MP_UPGRADE 0x008 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x010 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * Mpool statistics structure.
+ */
+struct __db_mpool_stat {
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_pages; /* Total number of pages. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ u_int32_t st_hash_wait; /* Hash lock granted after wait. */
+ u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted with nowait. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_alloc; /* Number of page allocations. */
+ u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ u_int32_t st_alloc_pages; /* Pages checked during allocation. */
+ u_int32_t st_alloc_max_pages; /* Max checked during allocation. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_GETPGNOS=5, /* Internal. */
+ DB_TXN_OPENFILES=6, /* Internal. */
+ DB_TXN_POPENFILES=7, /* Internal. */
+ DB_TXN_PRINT=8 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time this txn expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ /* API-private structure: used by C++ */
+ void *api_internal;
+
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+ /* Methods. */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, u_int32_t));
+ int (*discard) __P((DB_TXN *, u_int32_t));
+ u_int32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, u_int8_t *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_COMPENSATE 0x02 /* Compensating transaction. */
+#define TXN_DIRTY_READ 0x04 /* Transaction does dirty reads. */
+#define TXN_LOCKTIMEOUT 0x08 /* Transaction has a lock timeout. */
+#define TXN_MALLOC 0x10 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x20 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x40 /* Do not wait on locks. */
+#define TXN_SYNC 0x80 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* LSN when transaction began */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/open XA interface. The xa #define
+ * XIDDATASIZE defines the size of a global transaction ID. We have our own
+ * version here which must have the same value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_start flags values */
+#define DB_REP_CLIENT 0x001
+#define DB_REP_LOGSONLY 0x002
+#define DB_REP_MASTER 0x004
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ u_int32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+
+ u_int32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ u_int32_t st_gen; /* Current generation number. */
+ u_int32_t st_log_duplicated; /* Log records received multiply.+ */
+ u_int32_t st_log_queued; /* Log records currently queued.+ */
+ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ u_int32_t st_log_records; /* Log records received and put.+ */
+ u_int32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ u_int32_t st_master_changes; /* # of times we've switched masters. */
+ u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ u_int32_t st_msgs_processed; /* Messages received and processed.+ */
+ u_int32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ u_int32_t st_msgs_sent; /* # of successful message sends.+ */
+ u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ u_int32_t st_nthrottles; /* # of times we were throttled. */
+ u_int32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ u_int32_t st_txns_applied; /* # of transactions applied.+ */
+
+ /* Elections generally. */
+ u_int32_t st_elections; /* # of elections held.+ */
+ u_int32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ u_int32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ int st_election_tiebreaker; /* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 8 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_COMMIT 5 /* log_put() (internal) */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURRENT 8 /* c_get(), c_put(), DB_LOGC->get() */
+#define DB_FAST_STAT 9 /* stat() */
+#define DB_FIRST 10 /* c_get(), DB_LOGC->get() */
+#define DB_GET_BOTH 11 /* get(), c_get() */
+#define DB_GET_BOTHC 12 /* c_get() (internal) */
+#define DB_GET_BOTH_RANGE 13 /* get(), c_get() */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT 19 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), DB_LOGC->get() */
+#define DB_PREV_NODUP 28 /* c_get(), DB_LOGC->get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), DB_LOGC->get() */
+#define DB_SET_LOCK_TIMEOUT 31 /* set_timout() */
+#define DB_SET_RANGE 32 /* c_get() */
+#define DB_SET_RECNO 33 /* get(), c_get() */
+#define DB_SET_TXN_NOW 34 /* set_timout() (internal) */
+#define DB_SET_TXN_TIMEOUT 35 /* set_timout() */
+#define DB_UPDATE_SECONDARY 36 /* c_get(), c_del() (internal) */
+#define DB_WRITECURSOR 37 /* cursor() */
+#define DB_WRITELOCK 38 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+/* DB_DIRTY_READ 0x01000000 Dirty Read. */
+#define DB_FLUSH 0x02000000 /* Flush data to disk. */
+#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */
+#define DB_NOCOPY 0x10000000 /* Don't copy data */
+#define DB_PERMANENT 0x20000000 /* Flag record with REP_PERMANENT. */
+#define DB_RMW 0x40000000 /* Acquire write flag immediately. */
+#define DB_WRNOSYNC 0x80000000 /* Private: write, don't sync log_put */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30989)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30988)/* There are two masters. */
+#define DB_REP_HOLDELECTION (-30987)/* Time to hold an election. */
+#define DB_REP_NEWMASTER (-30986)/* We have learned of a new master. */
+#define DB_REP_NEWSITE (-30985)/* New site entered system. */
+#define DB_REP_OUTDATED (-30984)/* Site is too far behind master. */
+#define DB_REP_UNAVAIL (-30983)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30982)/* Panic return. */
+#define DB_SECONDARY_BAD (-30981)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30980)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_LOCK_NOTEXIST (-30896)/* Object to lock is gone. */
+#define DB_NEEDSPLIT (-30895)/* Page needs to be split. */
+#define DB_SURPRISE_KID (-30894)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30893)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30892)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30891)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30890)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+ DB_CACHE_PRIORITY priority; /* Priority in the buffer pool. */
+
+ DB_MUTEX *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ u_int32_t lid; /* Locker id for handle locking. */
+ u_int32_t cur_lid; /* Current handle lock holder. */
+ u_int32_t associate_lid; /* Locker id for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how
+ * many threads are updating this secondary (see __db_c_put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ u_int32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* Methods. */
+ int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
+ const DBT *, DBT *), u_int32_t));
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_cache_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*stat) __P((DB *, void *, u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *,
+ DB_TXN *, const char *, const char *, DB_LSN *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*stored_close) __P((DB *, u_int32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */
+#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */
+#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */
+#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */
+#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */
+#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */
+#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */
+#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */
+#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00000200 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */
+#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */
+#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */
+#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */
+#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called. */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad. */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly. */
+#define DB_AM_RECNUM 0x00080000 /* DB_RECNUM. */
+#define DB_AM_RECOVER 0x00100000 /* DB opened by recovery routine. */
+#define DB_AM_RENUMBER 0x00200000 /* DB_RENUMBER. */
+#define DB_AM_REVSPLITOFF 0x00400000 /* DB_REVSPLITOFF. */
+#define DB_AM_SECONDARY 0x00800000 /* Database is a secondary index. */
+#define DB_AM_SNAPSHOT 0x01000000 /* DB_SNAPSHOT. */
+#define DB_AM_SUBDB 0x02000000 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x04000000 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x08000000 /* Opened in a transaction. */
+#define DB_AM_VERIFYING 0x10000000 /* DB handle is in the verifier. */
+ u_int32_t flags;
+};
+
+/*
+ * Macros for bulk get. Note that wherever we use a DBT *, we explicitly
+ * cast it; this allows the same macros to work with C++ Dbt *'s, as Dbt
+ * is a subclass of struct DBT in C++.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (u_int8_t *)((DBT *)(dbt))->data + \
+ ((DBT *)(dbt))->ulen - sizeof(u_int32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (u_int8_t *)((DBT *)(dbt))->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retklen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+ /* Private: for secondary indices. */
+ int (*c_real_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */
+#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */
+#define DBC_OPD 0x0008 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0020 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x0100 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */
+#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */
+#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_extentsize; /* Pages per extent. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+typedef enum {
+ DB_NOTICE_LOGFILE_CHANGED
+} db_notices;
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+ void (*db_noticecall) __P((DB_ENV *, db_notices));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_REPLICATION 0x0008 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_size; /* Log file size. */
+ u_int32_t lg_regionmax; /* Region size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ int rep_eid; /* environment id. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int panic_errval; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+ u_int32_t tas_spins; /* test-and-set spins. */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ DB_MUTEX *dblist_mutexp; /* Mutex. */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* DB_ENV Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, u_int32_t));
+ int (*dbrename) __P((DB_ENV *, DB_TXN *,
+ const char *, const char *, const char *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ void (*set_noticecall) __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rpc_server) __P((DB_ENV *,
+ void *, const char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tas_spins) __P((DB_ENV *, u_int32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ void *lg_handle; /* Log handle and methods. */
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+ int (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+
+ void *lk_handle; /* Lock handle and methods. */
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+ int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ int (*lock_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*lock_get) __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, u_int32_t *));
+ int (*lock_id_free) __P((DB_ENV *, u_int32_t));
+ int (*lock_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*lock_downgrade) __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+
+ void *mp_handle; /* Mpool handle and methods. */
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ int (*memp_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int (*memp_nameop) __P((DB_ENV *,
+ u_int8_t *, const char *, const char *, const char *));
+ int (*memp_register) __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+
+ void *rep_handle; /* Replication handle and methods. */
+ int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *));
+ int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ int (*set_rep_election) __P((DB_ENV *,
+ u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+ int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_timeout) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_transport) __P((DB_ENV *, int,
+ int (*) (DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+
+ void *tx_handle; /* Txn handle and methods. */
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ int (*txn_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*txn_recover) __P((DB_ENV *,
+ DB_PREPLIST *, long, long *, u_int32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTSEND 2 /* after REP_ELECT msgnit */
+#define DB_TEST_ELECTVOTE1 3 /* after __rep_send_vote 1 */
+#define DB_TEST_ELECTVOTE2 4 /* after __rep_wait */
+#define DB_TEST_ELECTWAIT1 5 /* after REP_VOTE2 */
+#define DB_TEST_ELECTWAIT2 6 /* after __rep_wait 2 */
+#define DB_TEST_PREDESTROY 7 /* before destroy op */
+#define DB_TEST_PREOPEN 8 /* before __os_open */
+#define DB_TEST_POSTDESTROY 9 /* after destroy op */
+#define DB_TEST_POSTLOG 10 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 11 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 12 /* after __os_open */
+#define DB_TEST_POSTSYNC 13 /* after syncing the log */
+#define DB_TEST_SUBDB_LOCKS 14 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */
+#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */
+#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */
+#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOLOCKING 0x0000200 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x0000400 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x0000800 /* Okay if panic set. */
+#define DB_ENV_OPEN_CALLED 0x0001000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x0002000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x0004000 /* DB_PRIVATE set. */
+#define DB_ENV_REGION_INIT 0x0008000 /* DB_REGION_INIT set. */
+#define DB_ENV_REP_CLIENT 0x0010000 /* Replication client. */
+#define DB_ENV_REP_LOGSONLY 0x0020000 /* Log files only replication site. */
+#define DB_ENV_REP_MASTER 0x0040000 /* Replication master. */
+#define DB_ENV_RPCCLIENT 0x0080000 /* DB_CLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x0800000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */
+ u_int32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_close(a) __db_ndbm_close@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_delete(a, b) __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_error(a) __db_ndbm_error@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_open(a, b, c) __db_ndbm_open@DB_VERSION_UNIQUE_NAME@(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store@DB_VERSION_UNIQUE_NAME@(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init@DB_VERSION_UNIQUE_NAME@(a)
+#define dbmclose __db_dbm_close@DB_VERSION_UNIQUE_NAME@
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete@DB_VERSION_UNIQUE_NAME@(a)
+#endif
+#define fetch(a) __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@(a)
+#define firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define nextkey(a) __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define store(a, b) __db_dbm_store@DB_VERSION_UNIQUE_NAME@(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate@DB_VERSION_UNIQUE_NAME@(a)
+#define hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@
+#define hsearch(a, b) __db_hsearch@DB_VERSION_UNIQUE_NAME@(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_H_ */
diff --git a/storage/bdb/dbinc/db_185.in b/storage/bdb/dbinc/db_185.in
new file mode 100644
index 00000000000..86e2290c304
--- /dev/null
+++ b/storage/bdb/dbinc/db_185.in
@@ -0,0 +1,169 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_185.in,v 11.8 2002/01/11 15:52:24 bostic Exp $
+ */
+
+#ifndef _DB_185_H_
+#define _DB_185_H_
+
+#include <sys/types.h>
+
+#include <limits.h>
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+#define RET_ERROR -1 /* Return values. */
+#define RET_SUCCESS 0
+#define RET_SPECIAL 1
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+/*
+ * XXX
+ * SGI/IRIX already has a pgno_t.
+ */
+#ifdef sgi
+#define pgno_t db_pgno_t
+#endif
+
+#define MAX_PAGE_NUMBER 0xffffffff /* >= # of pages in a file */
+typedef u_int32_t pgno_t;
+#define MAX_PAGE_OFFSET 65535 /* >= # of bytes in a page */
+typedef u_int16_t indx_t;
+#define MAX_REC_NUMBER 0xffffffff /* >= # of records in a tree */
+typedef u_int32_t recno_t;
+
+/* Key/data structure -- a Data-Base Thang. */
+typedef struct {
+ void *data; /* data */
+ size_t size; /* data length */
+} DBT;
+
+/* Routine flags. */
+#define R_CURSOR 1 /* del, put, seq */
+#define __R_UNUSED 2 /* UNUSED */
+#define R_FIRST 3 /* seq */
+#define R_IAFTER 4 /* put (RECNO) */
+#define R_IBEFORE 5 /* put (RECNO) */
+#define R_LAST 6 /* seq (BTREE, RECNO) */
+#define R_NEXT 7 /* seq */
+#define R_NOOVERWRITE 8 /* put */
+#define R_PREV 9 /* seq (BTREE, RECNO) */
+#define R_SETCURSOR 10 /* put (RECNO) */
+#define R_RECNOSYNC 11 /* sync (RECNO) */
+
+typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+
+/* Access method description structure. */
+typedef struct __db {
+ DBTYPE type; /* Underlying db type. */
+ int (*close) __P((struct __db *));
+ int (*del) __P((const struct __db *, const DBT *, u_int));
+ int (*get) __P((const struct __db *, const DBT *, DBT *, u_int));
+ int (*put) __P((const struct __db *, DBT *, const DBT *, u_int));
+ int (*seq) __P((const struct __db *, DBT *, DBT *, u_int));
+ int (*sync) __P((const struct __db *, u_int));
+ void *internal; /* Access method private. */
+ int (*fd) __P((const struct __db *));
+} DB;
+
+#define BTREEMAGIC 0x053162
+#define BTREEVERSION 3
+
+/* Structure used to pass parameters to the btree routines. */
+typedef struct {
+#define R_DUP 0x01 /* duplicate keys */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t maxkeypage; /* maximum keys per page */
+ u_int32_t minkeypage; /* minimum keys per page */
+ u_int32_t psize; /* page size */
+ int (*compare) /* comparison function */
+ __P((const DBT *, const DBT *));
+ size_t (*prefix) /* prefix function */
+ __P((const DBT *, const DBT *));
+ int lorder; /* byte order */
+} BTREEINFO;
+
+#define HASHMAGIC 0x061561
+#define HASHVERSION 2
+
+/* Structure used to pass parameters to the hashing routines. */
+typedef struct {
+ u_int32_t bsize; /* bucket size */
+ u_int32_t ffactor; /* fill factor */
+ u_int32_t nelem; /* number of elements */
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t /* hash function */
+ (*hash) __P((const void *, size_t));
+ int lorder; /* byte order */
+} HASHINFO;
+
+/* Structure used to pass parameters to the record routines. */
+typedef struct {
+#define R_FIXEDLEN 0x01 /* fixed-length records */
+#define R_NOKEY 0x02 /* key not required */
+#define R_SNAPSHOT 0x04 /* snapshot the input */
+ u_int32_t flags;
+ u_int32_t cachesize; /* bytes to cache */
+ u_int32_t psize; /* page size */
+ int lorder; /* byte order */
+ size_t reclen; /* record length (fixed-length records) */
+ u_char bval; /* delimiting byte (variable-length records */
+ char *bfname; /* btree file name */
+} RECNOINFO;
+
+/* Re-define the user's dbopen calls. */
+#define dbopen __db185_open@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_185_H_ */
diff --git a/storage/bdb/dbinc/db_am.h b/storage/bdb/dbinc/db_am.h
new file mode 100644
index 00000000000..c5aa424255d
--- /dev/null
+++ b/storage/bdb/dbinc/db_am.h
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_am.h,v 11.61 2002/08/08 03:20:46 bostic Exp $
+ */
+#ifndef _DB_AM_H_
+#define _DB_AM_H_
+
+/*
+ * IS_AUTO_COMMIT --
+ * Test for local auto-commit flag or global flag with no local DbTxn
+ * handle.
+ */
+#define IS_AUTO_COMMIT(dbenv, txn, flags) \
+ (LF_ISSET(DB_AUTO_COMMIT) || \
+ ((txn) == NULL && F_ISSET((dbenv), DB_ENV_AUTO_COMMIT)))
+
+/* DB recovery operation codes. */
+#define DB_ADD_DUP 1
+#define DB_REM_DUP 2
+#define DB_ADD_BIG 3
+#define DB_REM_BIG 4
+#define DB_ADD_PAGE 5
+#define DB_REM_PAGE 6
+
+/*
+ * Standard initialization and shutdown macros for all recovery functions.
+ */
+#define REC_INTRO(func, inc_count) { \
+ argp = NULL; \
+ dbc = NULL; \
+ file_dbp = NULL; \
+ mpf = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ goto out; \
+ if ((ret = __dbreg_id_to_db(dbenv, argp->txnid, \
+ &file_dbp, argp->fileid, inc_count)) != 0) { \
+ if (ret == DB_DELETED) { \
+ ret = 0; \
+ goto done; \
+ } \
+ goto out; \
+ } \
+ if ((ret = file_dbp->cursor(file_dbp, NULL, &dbc, 0)) != 0) \
+ goto out; \
+ F_SET(dbc, DBC_RECOVER); \
+ mpf = file_dbp->mpf; \
+}
+
+#define REC_CLOSE { \
+ int __t_ret; \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ if (dbc != NULL && \
+ (__t_ret = dbc->c_close(dbc)) != 0 && ret == 0) \
+ ret = __t_ret; \
+ return (ret); \
+}
+
+/*
+ * No-op versions of the same macros.
+ */
+#define REC_NOOP_INTRO(func) { \
+ argp = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ return (ret); \
+}
+#define REC_NOOP_CLOSE \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ return (ret); \
+
+/*
+ * Standard debugging macro for all recovery functions.
+ */
+#ifdef DEBUG_RECOVER
+#define REC_PRINT(func) \
+ (void)func(dbenv, dbtp, lsnp, op, info);
+#else
+#define REC_PRINT(func)
+#endif
+
+/*
+ * Actions to __db_lget
+ */
+#define LCK_ALWAYS 1 /* Lock even for off page dup cursors */
+#define LCK_COUPLE 2 /* Lock Couple */
+#define LCK_COUPLE_ALWAYS 3 /* Lock Couple even in txn. */
+#define LCK_DOWNGRADE 4 /* Downgrade the lock. (internal) */
+#define LCK_ROLLBACK 5 /* Lock even if in rollback */
+
+/*
+ * If doing transactions we have to hold the locks associated with a data item
+ * from a page for the entire transaction. However, we don't have to hold the
+ * locks associated with walking the tree. Distinguish between the two so that
+ * we don't tie up the internal pages of the tree longer than necessary.
+ */
+#define __LPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? \
+ (dbc)->dbp->dbenv->lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
+
+/*
+ * __TLPUT -- transactional lock put
+ * If the lock is valid then
+ * If we are not in a transaction put the lock.
+ * Else if the cursor is doing dirty reads and this was a read then
+ * put the lock.
+ * Else if the db is supporting dirty reads and this is a write then
+ * downgrade it.
+ * Else do nothing.
+ */
+#define __TLPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? __db_lput(dbc, &(lock)) : 0)
+
+typedef struct {
+ DBC *dbc;
+ int count;
+} db_trunc_param;
+
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+#endif /* !_DB_AM_H_ */
diff --git a/storage/bdb/dbinc/db_cxx.in b/storage/bdb/dbinc/db_cxx.in
new file mode 100644
index 00000000000..6752b36ec42
--- /dev/null
+++ b/storage/bdb/dbinc/db_cxx.in
@@ -0,0 +1,795 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_cxx.in,v 11.113 2002/08/23 13:02:27 mjc Exp $
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <stdarg.h>
+
+@cxx_have_stdheaders@
+#ifdef HAVE_CXX_STDHEADERS
+#include <iostream>
+#define __DB_OSTREAMCLASS std::ostream
+#else
+#include <iostream.h>
+#define __DB_OSTREAMCLASS ostream
+#endif
+
+#include "db.h"
+#include "cxx_common.h"
+#include "cxx_except.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLogc; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class DbPreplist; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users to define
+// callback functions. For performance and logistical reasons, some
+// callback functions must be declared in extern "C" blocks. For others,
+// we allow you to declare the callbacks in C++ or C (or an extern "C"
+// block) as you wish. See the set methods for the callbacks for
+// the choices.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef void (*db_free_fcn_type)
+ (void *);
+ typedef int (*bt_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+ friend class DbLogc; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbMpoolFile);
+
+public:
+ int close(u_int32_t flags);
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ void last_pgno(db_pgno_t *pgnoaddr);
+ int open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+ int put(void *pgaddr, u_int32_t flags);
+ void refcnt(db_pgno_t *pgnoaddr);
+ int set(void *pgaddr, u_int32_t flags);
+ int set_clear_len(u_int32_t len);
+ int set_fileid(u_int8_t *fileid);
+ int set_ftype(int ftype);
+ int set_lsn_offset(int32_t offset);
+ int set_pgcookie(DBT *dbt);
+ void set_unlink(int);
+ int sync();
+
+ virtual DB_MPOOLFILE *get_DB_MPOOLFILE()
+ {
+ return (DB_MPOOLFILE *)imp();
+ }
+
+ virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const
+ {
+ return (const DB_MPOOLFILE *)constimp();
+ }
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ virtual ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// This is filled in and returned by the DbEnv::txn_recover() method.
+//
+
+class _exported DbPreplist
+{
+public:
+ DbTxn *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbTxn);
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ int discard(u_int32_t flags);
+ u_int32_t id();
+ int prepare(u_int8_t *gid);
+ int set_timeout(db_timeout_t timeout, u_int32_t flags);
+
+ virtual DB_TXN *get_DB_TXN()
+ {
+ return (DB_TXN *)imp();
+ }
+
+ virtual const DB_TXN *get_const_DB_TXN() const
+ {
+ return (const DB_TXN *)constimp();
+ }
+
+ static DbTxn* get_DbTxn(DB_TXN *txn)
+ {
+ return (DbTxn *)txn->api_internal;
+ }
+
+ static const DbTxn* get_const_DbTxn(const DB_TXN *txn)
+ {
+ return (const DbTxn *)txn->api_internal;
+ }
+
+ // For internal use only.
+ static DbTxn* wrap_DB_TXN(DB_TXN *txn);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ // For internal use only.
+ DbTxn(DB_TXN *txn);
+ virtual ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbEnv);
+
+public:
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ virtual ~DbEnv();
+
+ // These methods match those in the C interface.
+ //
+ virtual int close(u_int32_t);
+ virtual int dbremove(DbTxn *txn, const char *name, const char *subdb,
+ u_int32_t flags);
+ virtual int dbrename(DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual void *get_app_private() const;
+ virtual int open(const char *, u_int32_t, int);
+ virtual int remove(const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_data_dir(const char *);
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_flags(u_int32_t, int);
+ virtual int set_feedback(void (*)(DbEnv *, int, int));
+ virtual int set_lg_bsize(u_int32_t);
+ virtual int set_lg_dir(const char *);
+ virtual int set_lg_max(u_int32_t);
+ virtual int set_lg_regionmax(u_int32_t);
+ virtual int set_lk_conflicts(u_int8_t *, int);
+ virtual int set_lk_detect(u_int32_t);
+ virtual int set_lk_max(u_int32_t);
+ virtual int set_lk_max_lockers(u_int32_t);
+ virtual int set_lk_max_locks(u_int32_t);
+ virtual int set_lk_max_objects(u_int32_t);
+ virtual int set_mp_mmapsize(size_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_rpc_server(void *, char *, long, long, u_int32_t);
+ virtual int set_shm_key(long);
+ virtual int set_timeout(db_timeout_t timeout, u_int32_t flags);
+ virtual int set_tmp_dir(const char *);
+ virtual int set_tas_spins(u_int32_t);
+ virtual int set_tx_max(u_int32_t);
+ virtual int set_app_dispatch(int (*)(DbEnv *,
+ Dbt *, DbLsn *, db_recops));
+ virtual int set_tx_timestamp(time_t *);
+ virtual int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+ static void runtime_error_dbt(const char *caller, Dbt *dbt,
+ int error_policy);
+ static void runtime_error_lock_get(const char *caller, int err,
+ db_lockop_t op, db_lockmode_t mode,
+ const Dbt *obj, DbLock lock, int index,
+ int error_policy);
+
+ // Lock functions
+ //
+ virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ virtual int lock_id(u_int32_t *idp);
+ virtual int lock_id_free(u_int32_t id);
+ virtual int lock_put(DbLock *lock);
+ virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+ virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ virtual int log_archive(char **list[], u_int32_t flags);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ virtual int log_cursor(DbLogc **cursorp, u_int32_t flags);
+ virtual int log_file(DbLsn *lsn, char *namep, size_t len);
+ virtual int log_flush(const DbLsn *lsn);
+ virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+
+ // Mpool functions
+ //
+ virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+ virtual int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+ virtual int memp_stat(DB_MPOOL_STAT
+ **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags);
+ virtual int memp_sync(DbLsn *lsn);
+ virtual int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ virtual int txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags);
+ virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+
+ // Replication functions
+ //
+ virtual int rep_elect(int, int, u_int32_t, int *);
+ virtual int rep_process_message(Dbt *, Dbt *, int *);
+ virtual int rep_start(Dbt *, u_int32_t);
+ virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+ virtual int set_rep_limit(u_int32_t, u_int32_t);
+ virtual int set_rep_transport(u_int32_t,
+ int (*)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t));
+
+ // Conversion functions
+ //
+ virtual DB_ENV *get_DB_ENV()
+ {
+ return (DB_ENV *)imp();
+ }
+
+ virtual const DB_ENV *get_const_DB_ENV() const
+ {
+ return (const DB_ENV *)constimp();
+ }
+
+ static DbEnv* get_DbEnv(DB_ENV *dbenv)
+ {
+ return (DbEnv *)dbenv->api1_internal;
+ }
+
+ static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv)
+ {
+ return (const DbEnv *)dbenv->api1_internal;
+ }
+
+ // For internal use only.
+ static DbEnv* wrap_DB_ENV(DB_ENV *dbenv);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static int _rep_send_intercept(DB_ENV *env,
+ const DBT *cntrl, const DBT *data,
+ int id, u_int32_t flags);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // For internal use only.
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ void (*feedback_callback_)(DbEnv *, int, int);
+ void (*paniccall_callback_)(DbEnv *, int);
+ int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*rep_send_callback_)(DbEnv *,
+ const Dbt *, const Dbt *, int, u_int32_t);
+
+ // class data
+ static __DB_OSTREAMCLASS *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(Db);
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ virtual ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ virtual int associate(DbTxn *txn, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+ virtual int close(u_int32_t flags);
+ virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual int fd(int *fdp);
+ virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ virtual void *get_app_private() const;
+ virtual int get_byteswapped(int *);
+ virtual int get_type(DBTYPE *);
+ virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ virtual int open(DbTxn *txnid,
+ const char *, const char *subname, DBTYPE, u_int32_t, int);
+ virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
+ u_int32_t flags);
+ virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ virtual int remove(const char *, const char *, u_int32_t);
+ virtual int rename(const char *, const char *, const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
+ virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_bt_maxkey(u_int32_t);
+ virtual int set_bt_minkey(u_int32_t);
+ virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
+ virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_cache_priority(DB_CACHE_PRIORITY);
+ virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/
+ virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_feedback(void (*)(Db *, int, int));
+ virtual int set_flags(u_int32_t);
+ virtual int set_h_ffactor(u_int32_t);
+ virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/
+ virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t));
+ virtual int set_h_nelem(u_int32_t);
+ virtual int set_lorder(int);
+ virtual int set_pagesize(u_int32_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_re_delim(int);
+ virtual int set_re_len(u_int32_t);
+ virtual int set_re_pad(int);
+ virtual int set_re_source(char *);
+ virtual int set_q_extentsize(u_int32_t);
+ virtual int stat(void *sp, u_int32_t flags);
+ virtual int sync(u_int32_t flags);
+ virtual int truncate(DbTxn *, u_int32_t *, u_int32_t);
+ virtual int upgrade(const char *name, u_int32_t flags);
+ virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, u_int32_t);
+
+ // These additional methods are not in the C interface, and
+ // are only available for C++.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ virtual DB *get_DB()
+ {
+ return (DB *)imp();
+ }
+
+ virtual const DB *get_const_DB() const
+ {
+ return (const DB *)constimp();
+ }
+
+ static Db* get_Db(DB *db)
+ {
+ return (Db *)db->api_internal;
+ }
+
+ static const Db* get_const_Db(const DB *db)
+ {
+ return (const Db *)db->api_internal;
+ }
+
+private:
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+
+public:
+ // These are public only because they need to be called
+ // via C callback functions. They should never be used by
+ // external users of this class.
+ //
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+ int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *);
+ int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *);
+ int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ void (*feedback_callback_)(Db *, int, int);
+ u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+ friend class DbLogc;
+
+public:
+
+ // key/data
+ void *get_data() const { return data; }
+ void set_data(void *value) { data = value; }
+
+ // key/data length
+ u_int32_t get_size() const { return size; }
+ void set_size(u_int32_t value) { size = value; }
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const { return ulen; }
+ void set_ulen(u_int32_t value) { ulen = value; }
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const { return dlen; }
+ void set_dlen(u_int32_t value) { dlen = value; }
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const { return doff; }
+ void set_doff(u_int32_t value) { doff = value; }
+
+ // flags
+ u_int32_t get_flags() const { return flags; }
+ void set_flags(u_int32_t value) { flags = value; }
+
+ // Conversion functions
+ DBT *get_DBT() { return (DBT *)this; }
+ const DBT *get_const_DBT() const { return (const DBT *)this; }
+
+ static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; }
+ static const Dbt* get_const_Dbt(const DBT *dbt)
+ { return (const Dbt *)dbt; }
+
+ Dbt(void *data, u_int32_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // Note: no extra data appears in this class (other than
+ // inherited from DBT) since we need DBT and Dbt objects
+ // to have interchangable pointers.
+ //
+ // When subclassing this class, remember that callback
+ // methods like bt_compare, bt_prefix, dup_compare may
+ // internally manufacture DBT objects (which later are
+ // cast to Dbt), so such callbacks might receive objects
+ // not of your subclassed type.
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+
+class _exported DbLogc : protected DB_LOGC
+{
+ friend class DbEnv;
+
+public:
+ int close(u_int32_t _flags);
+ int get(DbLsn *lsn, Dbt *data, u_int32_t _flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ DbLogc();
+ ~DbLogc();
+
+ // no copying
+ DbLogc(const Dbc &);
+ DbLogc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/storage/bdb/dbinc/db_dispatch.h b/storage/bdb/dbinc/db_dispatch.h
new file mode 100644
index 00000000000..283eb1e95de
--- /dev/null
+++ b/storage/bdb/dbinc/db_dispatch.h
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_dispatch.h,v 11.30 2002/06/20 19:34:03 margo Exp $
+ */
+
+#ifndef _DB_DISPATCH_H_
+#define _DB_DISPATCH_H_
+
+/*
+ * Declarations and typedefs for the list of transaction IDs used during
+ * recovery. This is a generic list used to pass along whatever information
+ * we need during recovery.
+ */
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_PGNO,
+ TXNLIST_TXNID
+} db_txnlist_type;
+
+#define DB_TXNLIST_MASK(hp, n) (n % hp->nslots)
+struct __db_txnhead {
+ u_int32_t maxid; /* Maximum transaction id. */
+ DB_LSN maxlsn; /* Maximum commit lsn. */
+ DB_LSN ckplsn; /* LSN of last retained checkpoint. */
+ DB_LSN trunc_lsn; /* Lsn to which we are going to truncate;
+ * make sure we abort anyone after this. */
+ int32_t generation; /* Current generation number. */
+ int32_t gen_alloc; /* Number of generations allocated. */
+ struct {
+ int32_t generation;
+ u_int32_t txn_min;
+ u_int32_t txn_max;
+ } *gen_array; /* Array of txnids associted with a gen. */
+ int nslots;
+ LIST_HEAD(__db_headlink, __db_txnlist) head[1];
+};
+
+struct __db_txnlist {
+ db_txnlist_type type;
+ LIST_ENTRY(__db_txnlist) links;
+ union {
+ struct {
+ u_int32_t txnid;
+ int32_t generation;
+ int32_t status;
+ } t;
+ struct {
+ int32_t ntxns;
+ int32_t maxn;
+ DB_LSN *lsn_array;
+ } l;
+ struct {
+ int32_t nentries;
+ int32_t maxentry;
+ int32_t locked;
+ char *fname;
+ int32_t fileid;
+ db_pgno_t *pgno_array;
+ u_int8_t uid[DB_FILE_ID_LEN];
+ } p;
+ } u;
+};
+
+/*
+ * Flag value for __db_txnlist_lsnadd. Distinguish whether we are replacing
+ * an entry in the transaction list or adding a new one.
+ */
+#define TXNLIST_NEW 0x1
+
+#define DB_user_BEGIN 10000
+
+#endif /* !_DB_DISPATCH_H_ */
diff --git a/storage/bdb/dbinc/db_int.in b/storage/bdb/dbinc/db_int.in
new file mode 100644
index 00000000000..2f46293a65d
--- /dev/null
+++ b/storage/bdb/dbinc/db_int.in
@@ -0,0 +1,473 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_int.in,v 11.106 2002/09/10 02:48:08 bostic Exp $
+ */
+
+#ifndef _DB_INTERNAL_H_
+#define _DB_INTERNAL_H_
+
+/*******************************************************
+ * System includes, db.h, a few general DB includes. The DB includes are
+ * here because it's OK if db_int.h includes queue structure declarations.
+ *******************************************************/
+#ifndef NO_SYSTEM_INCLUDES
+#if defined(__STDC__) || defined(__cplusplus)
+#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
+#include <errno.h>
+#endif
+
+#include "db.h"
+
+#include "dbinc/queue.h"
+#include "dbinc/shqueue.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*******************************************************
+ * General purpose constants and macros.
+ *******************************************************/
+#define UINT16_T_MAX 0xffff /* Maximum 16 bit unsigned. */
+#define UINT32_T_MAX 0xffffffff /* Maximum 32 bit unsigned. */
+
+#define MEGABYTE 1048576
+#define GIGABYTE 1073741824
+
+#define MS_PER_SEC 1000 /* Milliseconds in a second. */
+#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/* Test for a power-of-two (tests true for zero, which doesn't matter here). */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+/* Test for valid page sizes. */
+#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
+#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+
+/* Minimum number of pages cached, by default. */
+#define DB_MINPAGECACHE 16
+
+/*
+ * If we are unable to determine the underlying filesystem block size, use
+ * 8K on the grounds that most OS's use less than 8K for a VM page size.
+ */
+#define DB_DEF_IOSIZE (8 * 1024)
+
+/*
+ * Aligning items to particular sizes or in pages or memory.
+ *
+ * db_align_t --
+ * Largest integral type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compiler that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment. Note: this alignment isn't sufficient for
+ * mutexes, which depend on things like cache line alignment. Mutex alignment
+ * is handled separately, in mutex.h.
+ *
+ * db_alignp_t --
+ * Integral type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use db_align_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there
+ * isn't an integral type the same size as a pointer -- here's hoping.
+ */
+@db_align_t_decl@
+@db_alignp_t_decl@
+
+/* Align an integer to a specific boundary. */
+#undef ALIGN
+#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+
+/*
+ * Convert a pointer to a small integral value.
+ *
+ * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast
+ * converts the value to an integral type, and the (u_int16_t) cast converts
+ * it to a small integral type so we don't get complaints when we assign the
+ * final result to an integral type smaller than db_alignp_t.
+ */
+#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p))
+#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p))
+
+/*
+ * There are several on-page structures that are declared to have a number of
+ * fields followed by a variable length array of items. The structure size
+ * without including the variable length array or the address of the first of
+ * those elements can be found using SSZ.
+ *
+ * This macro can also be used to find the offset of a structure element in a
+ * structure. This is used in various places to copy structure elements from
+ * unaligned memory references, e.g., pointers into a packed page.
+ *
+ * There are two versions because compilers object if you take the address of
+ * an array.
+ */
+#undef SSZ
+#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field))
+
+#undef SSZA
+#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0]))
+
+/* Structure used to print flag values. */
+typedef struct __fn {
+ u_int32_t mask; /* Flag value. */
+ const char *name; /* Flag name. */
+} FN;
+
+/* Set, clear and test flags. */
+#define FLD_CLR(fld, f) (fld) &= ~(f)
+#define FLD_ISSET(fld, f) ((fld) & (f))
+#define FLD_SET(fld, f) (fld) |= (f)
+#define F_CLR(p, f) (p)->flags &= ~(f)
+#define F_ISSET(p, f) ((p)->flags & (f))
+#define F_SET(p, f) (p)->flags |= (f)
+#define LF_CLR(f) ((flags) &= ~(f))
+#define LF_ISSET(f) ((flags) & (f))
+#define LF_SET(f) ((flags) |= (f))
+
+/* Display separator string. */
+#undef DB_LINE
+#define DB_LINE "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+
+/* Unused, or not-used-yet variable. "Shut that bloody compiler up!" */
+#define COMPQUIET(n, v) (n) = (v)
+
+/*******************************************************
+ * API return values
+ *******************************************************/
+ /*
+ * Return values that are OK for each different call. Most calls have
+ * a standard 'return of 0 is only OK value', but some, like db->get
+ * have DB_NOTFOUND as a return value, but it really isn't an error.
+ */
+#define DB_RETOK_STD(ret) ((ret) == 0)
+#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBCGET(ret) DB_RETOK_DBGET(ret)
+#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBDEL(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST)
+#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
+#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || (ret) == DB_REP_NEWMASTER || \
+ (ret) == DB_REP_NEWSITE)
+
+/*******************************************************
+ * Files.
+ *******************************************************/
+ /*
+ * We use 1024 as the maximum path length. It's too hard to figure out what
+ * the real path length is, as it was traditionally stored in <sys/param.h>,
+ * and that file isn't always available.
+ */
+#undef MAXPATHLEN
+#define MAXPATHLEN 1024
+
+#define PATH_DOT "." /* Current working directory. */
+#define PATH_SEPARATOR "/" /* Path separator character(s). */
+
+/*
+ * Flags understood by __os_open.
+ */
+#define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */
+#define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */
+#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x0008 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x0020 /* Opening a region file. */
+#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x0080 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */
+
+/*
+ * Seek options understood by __os_seek.
+ */
+typedef enum {
+ DB_OS_SEEK_CUR, /* POSIX: SEEK_CUR */
+ DB_OS_SEEK_END, /* POSIX: SEEK_END */
+ DB_OS_SEEK_SET /* POSIX: SEEK_SET */
+} DB_OS_SEEK;
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+/* Type passed to __db_appname(). */
+typedef enum {
+ DB_APP_NONE=0, /* No type (region). */
+ DB_APP_DATA, /* Data file. */
+ DB_APP_LOG, /* Log file. */
+ DB_APP_TMP /* Temporary file. */
+} APPNAME;
+
+/*
+ * CDB_LOCKING CDB product locking.
+ * CRYPTO_ON Security has been configured.
+ * LOCKING_ON Locking has been configured.
+ * LOGGING_ON Logging has been configured.
+ * MPOOL_ON Memory pool has been configured.
+ * RPC_ON RPC has been configured.
+ * TXN_ON Transactions have been configured.
+ */
+#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL)
+#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
+#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
+#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL)
+#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
+
+/*
+ * STD_LOCKING Standard locking, that is, locking was configured and CDB
+ * was not. We do not do locking in off-page duplicate trees,
+ * so we check for that in the cursor first.
+ */
+#define STD_LOCKING(dbc) \
+ (!F_ISSET(dbc, DBC_OPD) && \
+ !CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
+
+/*
+ * IS_RECOVERING: The system is running recovery.
+ */
+#define IS_RECOVERING(dbenv) \
+ (LOGGING_ON(dbenv) && \
+ F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
+
+/* Initialization methods are often illegal before/after open is called. */
+#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
+ if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 1));
+#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \
+ if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 0));
+
+/* We're not actually user hostile, honest. */
+#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \
+ if (handle == NULL) \
+ return (__db_env_config(dbenv, i, flags));
+
+/*******************************************************
+ * Database Access Methods.
+ *******************************************************/
+/*
+ * DB_IS_THREADED --
+ * The database handle is free-threaded (was opened with DB_THREAD).
+ */
+#define DB_IS_THREADED(dbp) \
+ ((dbp)->mutexp != NULL)
+
+/* Initialization methods are often illegal before/after open is called. */
+#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
+ if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 1));
+#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
+ if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 0));
+/* Some initialization methods are illegal if environment isn't local. */
+#define DB_ILLEGAL_IN_ENV(dbp, name) \
+ if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env((dbp)->dbenv, name));
+#define DB_ILLEGAL_METHOD(dbp, flags) { \
+ int __ret; \
+ if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
+ return (__ret); \
+}
+
+/*
+ * Common DBC->internal fields. Each access method adds additional fields
+ * to this list, but the initial fields are common.
+ */
+#define __DBC_INTERNAL \
+ DBC *opd; /* Off-page duplicate cursor. */\
+ \
+ void *page; /* Referenced page. */ \
+ db_pgno_t root; /* Tree root. */ \
+ db_pgno_t pgno; /* Referenced page number. */ \
+ db_indx_t indx; /* Referenced key item index. */\
+ \
+ DB_LOCK lock; /* Cursor lock. */ \
+ db_lockmode_t lock_mode; /* Lock mode. */
+
+struct __dbc_internal {
+ __DBC_INTERNAL
+};
+
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
+/*
+ * Access-method-common macro for determining whether a cursor
+ * has been initialized.
+ */
+#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+
+/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */
+#define FREE_IF_NEEDED(sdbp, dbt) \
+ if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \
+ __os_ufree((sdbp)->dbenv, (dbt)->data); \
+ F_CLR((dbt), DB_DBT_APPMALLOC); \
+ }
+
+/*
+ * Use memory belonging to object "owner" to return the results of
+ * any no-DBT-flag get ops on cursor "dbc".
+ */
+#define SET_RET_MEM(dbc, owner) \
+ do { \
+ (dbc)->rskey = &(owner)->my_rskey; \
+ (dbc)->rkey = &(owner)->my_rkey; \
+ (dbc)->rdata = &(owner)->my_rdata; \
+ } while (0)
+
+/* Use the return-data memory src is currently set to use in dest as well. */
+#define COPY_RET_MEM(src, dest) \
+ do { \
+ (dest)->rskey = (src)->rskey; \
+ (dest)->rkey = (src)->rkey; \
+ (dest)->rdata = (src)->rdata; \
+ } while (0)
+
+/* Reset the returned-memory pointers to their defaults. */
+#define RESET_RET_MEM(dbc) \
+ do { \
+ (dbc)->rskey = &(dbc)->my_rskey; \
+ (dbc)->rkey = &(dbc)->my_rkey; \
+ (dbc)->rdata = &(dbc)->my_rdata; \
+ } while (0)
+
+/*******************************************************
+ * Mpool.
+ *******************************************************/
+/*
+ * File types for DB access methods. Negative numbers are reserved to DB.
+ */
+#define DB_FTYPE_SET -1 /* Call pgin/pgout functions. */
+#define DB_FTYPE_NOTSET 0 /* Don't call... */
+
+/* Structure used as the DB pgin/pgout pgcookie. */
+typedef struct __dbpginfo {
+ size_t db_pagesize; /* Underlying page size. */
+ u_int32_t flags; /* Some DB_AM flags needed. */
+ DBTYPE type; /* DB type */
+} DB_PGINFO;
+
+/*******************************************************
+ * Log.
+ *******************************************************/
+/* Initialize an LSN to 'zero'. */
+#define ZERO_LSN(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 0; \
+} while (0)
+#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
+
+#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0)
+#define INIT_LSN(LSN) do { \
+ (LSN).file = 1; \
+ (LSN).offset = 0; \
+} while (0)
+
+#define MAX_LSN(LSN) do { \
+ (LSN).file = UINT32_T_MAX; \
+ (LSN).offset = UINT32_T_MAX; \
+} while (0)
+#define IS_MAX_LSN(LSN) \
+ ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX)
+
+/* If logging is turned off, smash the lsn. */
+#define LSN_NOT_LOGGED(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 1; \
+} while (0)
+#define IS_NOT_LOGGED_LSN(LSN) \
+ ((LSN).file == 0 && (LSN).offset == 1)
+
+/*
+ * Test if the environment is currently logging changes. If we're in
+ * recovery or we're a replication client, we don't need to log changes
+ * because they're already in the log, even though we have a fully functional
+ * log system.
+ */
+#define DBENV_LOGGING(dbenv) \
+ (LOGGING_ON(dbenv) && !F_ISSET((dbenv), DB_ENV_REP_CLIENT) && \
+ (!IS_RECOVERING(dbenv)))
+
+/*
+ * Test if we need to log a change. Note that the DBC_RECOVER flag is set
+ * when we're in abort, as well as during recovery; thus DBC_LOGGING may be
+ * false for a particular dbc even when DBENV_LOGGING is true.
+ *
+ * We explicitly use LOGGING_ON/DB_ENV_REP_CLIENT here because we don't
+ * want to have to pull in the log headers, which IS_RECOVERING (and thus
+ * DBENV_LOGGING) rely on, and because DBC_RECOVER should be set anytime
+ * IS_RECOVERING would be true.
+ */
+#define DBC_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET((dbc), DBC_RECOVER) && \
+ !F_ISSET((dbc)->dbp->dbenv, DB_ENV_REP_CLIENT))
+
+/*******************************************************
+ * Txn.
+ *******************************************************/
+#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
+#define IS_SUBTRANSACTION(txn) \
+ ((txn) != NULL && (txn)->parent != NULL)
+
+/*******************************************************
+ * Crypto.
+ *******************************************************/
+#define DB_IV_BYTES 16 /* Bytes per IV */
+#define DB_MAC_KEY 20 /* Bytes per MAC checksum */
+
+/*******************************************************
+ * Forward structure declarations.
+ *******************************************************/
+struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
+struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
+struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
+struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+/*******************************************************
+ * Remaining general DB includes.
+ *******************************************************/
+@db_int_def@
+
+#include "dbinc/globals.h"
+#include "dbinc/debug.h"
+#include "dbinc/mutex.h"
+#include "dbinc/region.h"
+#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */
+#include "dbinc_auto/env_ext.h"
+#include "dbinc/os.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+#endif /* !_DB_INTERNAL_H_ */
diff --git a/storage/bdb/dbinc/db_join.h b/storage/bdb/dbinc/db_join.h
new file mode 100644
index 00000000000..487ce3eebbb
--- /dev/null
+++ b/storage/bdb/dbinc/db_join.h
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * @(#)db_join.h 11.1 (Sleepycat) 7/25/99
+ */
+
+#ifndef _DB_JOIN_H_
+#define _DB_JOIN_H_
+
+/*
+ * Joins use a join cursor that is similar to a regular DB cursor except
+ * that it only supports c_get and c_close functionality. Also, it does
+ * not support the full range of flags for get.
+ */
+typedef struct __join_cursor {
+ u_int8_t *j_exhausted; /* Array of flags; is cursor i exhausted? */
+ DBC **j_curslist; /* Array of cursors in the join: constant. */
+ DBC **j_fdupcurs; /* Cursors w/ first intances of current dup. */
+ DBC **j_workcurs; /* Scratch cursor copies to muck with. */
+ DB *j_primary; /* Primary dbp. */
+ DBT j_key; /* Used to do lookups. */
+ DBT j_rdata; /* Memory used for data return. */
+ u_int32_t j_ncurs; /* How many cursors do we have? */
+#define JOIN_RETRY 0x01 /* Error on primary get; re-return same key. */
+ u_int32_t flags;
+} JOIN_CURSOR;
+
+#endif /* !_DB_JOIN_H_ */
diff --git a/storage/bdb/dbinc/db_page.h b/storage/bdb/dbinc/db_page.h
new file mode 100644
index 00000000000..97497556fd9
--- /dev/null
+++ b/storage/bdb/dbinc/db_page.h
@@ -0,0 +1,651 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_page.h,v 11.52 2002/09/13 21:24:04 bostic Exp $
+ */
+
+#ifndef _DB_PAGE_H_
+#define _DB_PAGE_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * DB page formats.
+ *
+ * !!!
+ * This implementation requires that values within the following structures
+ * NOT be padded -- note, ANSI C permits random padding within structures.
+ * If your compiler pads randomly you can just forget ever making DB run on
+ * your system. In addition, no data type can require larger alignment than
+ * its own size, e.g., a 4-byte data element may not require 8-byte alignment.
+ *
+ * Note that key/data lengths are often stored in db_indx_t's -- this is
+ * not accidental, nor does it limit the key/data size. If the key/data
+ * item fits on a page, it's guaranteed to be small enough to fit into a
+ * db_indx_t, and storing it in one saves space.
+ */
+
+#define PGNO_INVALID 0 /* Invalid page number in any database. */
+#define PGNO_BASE_MD 0 /* Base database: metadata page number. */
+
+/* Page types. */
+#define P_INVALID 0 /* Invalid page type. */
+#define __P_DUPLICATE 1 /* Duplicate. DEPRECATED in 3.1 */
+#define P_HASH 2 /* Hash. */
+#define P_IBTREE 3 /* Btree internal. */
+#define P_IRECNO 4 /* Recno internal. */
+#define P_LBTREE 5 /* Btree leaf. */
+#define P_LRECNO 6 /* Recno leaf. */
+#define P_OVERFLOW 7 /* Overflow. */
+#define P_HASHMETA 8 /* Hash metadata page. */
+#define P_BTREEMETA 9 /* Btree metadata page. */
+#define P_QAMMETA 10 /* Queue metadata page. */
+#define P_QAMDATA 11 /* Queue data page. */
+#define P_LDUP 12 /* Off-page duplicate leaf. */
+#define P_PAGETYPE_MAX 13
+
+/*
+ * When we create pages in mpool, we ask mpool to clear some number of bytes
+ * in the header. This number must be at least as big as the regular page
+ * headers and cover enough of the btree and hash meta-data pages to obliterate
+ * the page type.
+ */
+#define DB_PAGE_DB_LEN 32
+#define DB_PAGE_QUEUE_LEN 0
+
+/************************************************************************
+ GENERIC METADATA PAGE HEADER
+ *
+ * !!!
+ * The magic and version numbers have to be in the same place in all versions
+ * of the metadata page as the application may not have upgraded the database.
+ ************************************************************************/
+typedef struct _dbmeta33 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t encrypt_alg; /* 24: Encryption algorithm. */
+ u_int8_t type; /* 25: Page type. */
+#define DBMETA_CHKSUM 0x01
+ u_int8_t metaflags; /* 26: Meta-only flags */
+ u_int8_t unused1; /* 27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ db_pgno_t last_pgno; /* 32-35: Page number of last page in db. */
+ u_int32_t unused3; /* 36-39: Unused. */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA33, DBMETA;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta33 {
+#define BTM_DUP 0x001 /* Duplicates. */
+#define BTM_RECNO 0x002 /* Recno tree. */
+#define BTM_RECNUM 0x004 /* Btree: maintain record count. */
+#define BTM_FIXEDLEN 0x008 /* Recno: fixed length records. */
+#define BTM_RENUMBER 0x010 /* Recno: renumber on insert/delete. */
+#define BTM_SUBDB 0x020 /* Subdatabases. */
+#define BTM_DUPSORT 0x040 /* Duplicates are sorted. */
+#define BTM_MASK 0x07f
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-91: Root page. */
+ u_int32_t unused[92]; /* 92-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+
+ /*
+ * Minimum page size is 512.
+ */
+} BTMETA33, BTMETA;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta33 {
+#define DB_HASH_DUP 0x01 /* Duplicates. */
+#define DB_HASH_SUBDB 0x02 /* Subdatabases. */
+#define DB_HASH_DUPSORT 0x04 /* Duplicates are sorted. */
+ DBMETA dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+ u_int32_t unused[59]; /* 224-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+
+ /*
+ * Minimum page size is 512.
+ */
+} HMETA33, HMETA;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta33 {
+ DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Next recno to be allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ u_int32_t unused[91]; /* 96-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
+ /*
+ * Minimum page size is 512.
+ */
+} QMETA33, QMETA;
+
+/*
+ * DBMETASIZE is a constant used by __db_file_setup and DB->verify
+ * as a buffer which is guaranteed to be larger than any possible
+ * metadata page size and smaller than any disk sector.
+ */
+#define DBMETASIZE 512
+
+/************************************************************************
+ BTREE/HASH MAIN PAGE LAYOUT
+ ************************************************************************/
+/*
+ * +-----------------------------------+
+ * | lsn | pgno | prev pgno |
+ * +-----------------------------------+
+ * | next pgno | entries | hf offset |
+ * +-----------------------------------+
+ * | level | type | chksum |
+ * +-----------------------------------+
+ * | iv | index | free --> |
+ * +-----------+-----------------------+
+ * | F R E E A R E A |
+ * +-----------------------------------+
+ * | <-- free | item |
+ * +-----------------------------------+
+ * | item | item | item |
+ * +-----------------------------------+
+ *
+ * sizeof(PAGE) == 26 bytes + possibly 20 bytes of checksum and possibly
+ * 16 bytes of IV (+ 2 bytes for alignment), and the following indices
+ * are guaranteed to be two-byte aligned. If we aren't doing crypto or
+ * checksumming the bytes are reclaimed for data storage.
+ *
+ * For hash and btree leaf pages, index items are paired, e.g., inp[0] is the
+ * key for inp[1]'s data. All other types of pages only contain single items.
+ */
+typedef struct __pg_chksum {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[4]; /* 28-31: Checksum */
+} PG_CHKSUM;
+
+typedef struct __pg_crypto {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
+ /* !!!
+ * Must be 16-byte aligned for crypto
+ */
+} PG_CRYPTO;
+
+typedef struct _db_page {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ db_pgno_t prev_pgno; /* 12-15: Previous page number. */
+ db_pgno_t next_pgno; /* 16-19: Next page number. */
+ db_indx_t entries; /* 20-21: Number of items on the page. */
+ db_indx_t hf_offset; /* 22-23: High free byte page offset. */
+
+ /*
+ * The btree levels are numbered from the leaf to the root, starting
+ * with 1, so the leaf is level 1, its parent is level 2, and so on.
+ * We maintain this level on all btree pages, but the only place that
+ * we actually need it is on the root page. It would not be difficult
+ * to hide the byte on the root page once it becomes an internal page,
+ * so we could get this byte back if we needed it for something else.
+ */
+#define LEAFLEVEL 1
+#define MAXBTREELEVEL 255
+ u_int8_t level; /* 24: Btree tree level. */
+ u_int8_t type; /* 25: Page type. */
+} PAGE;
+
+#define SIZEOF_PAGE 26
+/*
+ * !!!
+ * DB_AM_ENCRYPT always implies DB_AM_CHKSUM so that must come first.
+ */
+#define P_INP(dbp, pg) \
+ ((db_indx_t *)((u_int8_t *)(pg) + SIZEOF_PAGE + \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? sizeof(PG_CRYPTO) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? sizeof(PG_CHKSUM) : 0))))
+
+#define P_IV(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, iv)) \
+ : NULL)
+
+#define P_CHKSUM(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, chksum)) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum)) \
+ : NULL))
+
+/* PAGE element macros. */
+#define LSN(p) (((PAGE *)p)->lsn)
+#define PGNO(p) (((PAGE *)p)->pgno)
+#define PREV_PGNO(p) (((PAGE *)p)->prev_pgno)
+#define NEXT_PGNO(p) (((PAGE *)p)->next_pgno)
+#define NUM_ENT(p) (((PAGE *)p)->entries)
+#define HOFFSET(p) (((PAGE *)p)->hf_offset)
+#define LEVEL(p) (((PAGE *)p)->level)
+#define TYPE(p) (((PAGE *)p)->type)
+
+/************************************************************************
+ QUEUE MAIN PAGE LAYOUT
+ ************************************************************************/
+/*
+ * Sizes of page below. Used to reclaim space if not doing
+ * crypto or checksumming. If you change the QPAGE below you
+ * MUST adjust this too.
+ */
+#define QPAGE_NORMAL 28
+#define QPAGE_CHKSUM 48
+#define QPAGE_SEC 64
+
+typedef struct _qpage {
+ DB_LSN lsn; /* 00-07: Log sequence number. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t unused0[3]; /* 12-23: Unused. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
+} QPAGE;
+
+#define QPAGE_SZ(dbp) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? QPAGE_SEC : \
+ F_ISSET((dbp), DB_AM_CHKSUM) ? QPAGE_CHKSUM : QPAGE_NORMAL)
+/*
+ * !!!
+ * The next_pgno and prev_pgno fields are not maintained for btree and recno
+ * internal pages. Doing so only provides a minor performance improvement,
+ * it's hard to do when deleting internal pages, and it increases the chance
+ * of deadlock during deletes and splits because we have to re-link pages at
+ * more than the leaf level.
+ *
+ * !!!
+ * The btree/recno access method needs db_recno_t bytes of space on the root
+ * page to specify how many records are stored in the tree. (The alternative
+ * is to store the number of records in the meta-data page, which will create
+ * a second hot spot in trees being actively modified, or recalculate it from
+ * the BINTERNAL fields on each access.) Overload the PREV_PGNO field.
+ */
+#define RE_NREC(p) \
+ ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? PREV_PGNO(p) : \
+ (db_pgno_t)(TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p)))
+#define RE_NREC_ADJ(p, adj) \
+ PREV_PGNO(p) += adj;
+#define RE_NREC_SET(p, num) \
+ PREV_PGNO(p) = num;
+
+/*
+ * Initialize a page.
+ *
+ * !!!
+ * Don't modify the page's LSN, code depends on it being unchanged after a
+ * P_INIT call.
+ */
+#define P_INIT(pg, pg_size, n, pg_prev, pg_next, btl, pg_type) do { \
+ PGNO(pg) = n; \
+ PREV_PGNO(pg) = pg_prev; \
+ NEXT_PGNO(pg) = pg_next; \
+ NUM_ENT(pg) = 0; \
+ HOFFSET(pg) = pg_size; \
+ LEVEL(pg) = btl; \
+ TYPE(pg) = pg_type; \
+} while (0)
+
+/* Page header length (offset to first index). */
+#define P_OVERHEAD(dbp) P_TO_UINT16(P_INP(dbp, 0))
+
+/* First free byte. */
+#define LOFFSET(dbp, pg) \
+ (P_OVERHEAD(dbp) + NUM_ENT(pg) * sizeof(db_indx_t))
+
+/* Free space on a regular page. */
+#define P_FREESPACE(dbp, pg) (HOFFSET(pg) - LOFFSET(dbp, pg))
+
+/* Get a pointer to the bytes at a specific index. */
+#define P_ENTRY(dbp, pg, indx) ((u_int8_t *)pg + P_INP(dbp, pg)[indx])
+
+/************************************************************************
+ OVERFLOW PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Overflow items are referenced by HOFFPAGE and BOVERFLOW structures, which
+ * store a page number (the first page of the overflow item) and a length
+ * (the total length of the overflow item). The overflow item consists of
+ * some number of overflow pages, linked by the next_pgno field of the page.
+ * A next_pgno field of PGNO_INVALID flags the end of the overflow item.
+ *
+ * Overflow page overloads:
+ * The amount of overflow data stored on each page is stored in the
+ * hf_offset field.
+ *
+ * The implementation reference counts overflow items as it's possible
+ * for them to be promoted onto btree internal pages. The reference
+ * count is stored in the entries field.
+ */
+#define OV_LEN(p) (((PAGE *)p)->hf_offset)
+#define OV_REF(p) (((PAGE *)p)->entries)
+
+/* Maximum number of bytes that you can put on an overflow page. */
+#define P_MAXSPACE(dbp, psize) ((psize) - P_OVERHEAD(dbp))
+
+/* Free space on an overflow page. */
+#define P_OVFLSPACE(dbp, psize, pg) (P_MAXSPACE(dbp, psize) - HOFFSET(pg))
+
+/************************************************************************
+ HASH PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define H_KEYDATA 1 /* Key/data item. */
+#define H_DUPLICATE 2 /* Duplicate key/data item. */
+#define H_OFFPAGE 3 /* Overflow key/data item. */
+#define H_OFFDUP 4 /* Overflow page of duplicates. */
+
+/*
+ * !!!
+ * Items on hash pages are (potentially) unaligned, so we can never cast the
+ * (page + offset) pointer to an HKEYDATA, HOFFPAGE or HOFFDUP structure, as
+ * we do with B+tree on-page structures. Because we frequently want the type
+ * field, it requires no alignment, and it's in the same location in all three
+ * structures, there's a pair of macros.
+ */
+#define HPAGE_PTYPE(p) (*(u_int8_t *)p)
+#define HPAGE_TYPE(dbp, pg, indx) (*P_ENTRY(dbp, pg, indx))
+
+/*
+ * The first and second types are H_KEYDATA and H_DUPLICATE, represented
+ * by the HKEYDATA structure:
+ *
+ * +-----------------------------------+
+ * | type | key/data ... |
+ * +-----------------------------------+
+ *
+ * For duplicates, the data field encodes duplicate elements in the data
+ * field:
+ *
+ * +---------------------------------------------------------------+
+ * | type | len1 | element1 | len1 | len2 | element2 | len2 |
+ * +---------------------------------------------------------------+
+ *
+ * Thus, by keeping track of the offset in the element, we can do both
+ * backward and forward traversal.
+ */
+typedef struct _hkeydata {
+ u_int8_t type; /* 00: Page type. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} HKEYDATA;
+#define HKEYDATA_DATA(p) (((u_int8_t *)p) + SSZA(HKEYDATA, data))
+
+/*
+ * The length of any HKEYDATA item. Note that indx is an element index,
+ * not a PAIR index.
+ */
+#define LEN_HITEM(dbp, pg, pgsize, indx) \
+ (((indx) == 0 ? pgsize : \
+ (P_INP(dbp, pg)[indx - 1])) - (P_INP(dbp, pg)[indx]))
+
+#define LEN_HKEYDATA(dbp, pg, psize, indx) \
+ (db_indx_t)(LEN_HITEM(dbp, pg, psize, indx) - HKEYDATA_SIZE(0))
+
+/*
+ * Page space required to add a new HKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define HKEYDATA_SIZE(len) \
+ ((len) + SSZA(HKEYDATA, data))
+#define HKEYDATA_PSIZE(len) \
+ (HKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/* Put a HKEYDATA item at the location referenced by a page entry. */
+#define PUT_HKEYDATA(pe, kd, len, type) { \
+ ((HKEYDATA *)pe)->type = type; \
+ memcpy((u_int8_t *)pe + sizeof(u_int8_t), kd, len); \
+}
+
+/*
+ * Macros the describe the page layout in terms of key-data pairs.
+ */
+#define H_NUMPAIRS(pg) (NUM_ENT(pg) / 2)
+#define H_KEYINDEX(indx) (indx)
+#define H_DATAINDEX(indx) ((indx) + 1)
+#define H_PAIRKEY(dbp, pg, indx) P_ENTRY(dbp, pg, H_KEYINDEX(indx))
+#define H_PAIRDATA(dbp, pg, indx) P_ENTRY(dbp, pg, H_DATAINDEX(indx))
+#define H_PAIRSIZE(dbp, pg, psize, indx) \
+ (LEN_HITEM(dbp, pg, psize, H_KEYINDEX(indx)) + \
+ LEN_HITEM(dbp, pg, psize, H_DATAINDEX(indx)))
+#define LEN_HDATA(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_DATAINDEX(indx))
+#define LEN_HKEY(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_KEYINDEX(indx))
+
+/*
+ * The third type is the H_OFFPAGE, represented by the HOFFPAGE structure:
+ */
+typedef struct _hoffpage {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} HOFFPAGE;
+
+#define HOFFPAGE_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, pgno))
+#define HOFFPAGE_TLEN(p) (((u_int8_t *)p) + SSZ(HOFFPAGE, tlen))
+
+/*
+ * Page space required to add a new HOFFPAGE item to the page, with and
+ * without the index value.
+ */
+#define HOFFPAGE_SIZE (sizeof(HOFFPAGE))
+#define HOFFPAGE_PSIZE (HOFFPAGE_SIZE + sizeof(db_indx_t))
+
+/*
+ * The fourth type is H_OFFDUP represented by the HOFFDUP structure:
+ */
+typedef struct _hoffdup {
+ u_int8_t type; /* 00: Page type and delete flag. */
+ u_int8_t unused[3]; /* 01-03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Offpage page number. */
+} HOFFDUP;
+#define HOFFDUP_PGNO(p) (((u_int8_t *)p) + SSZ(HOFFDUP, pgno))
+
+/*
+ * Page space required to add a new HOFFDUP item to the page, with and
+ * without the index value.
+ */
+#define HOFFDUP_SIZE (sizeof(HOFFDUP))
+
+/************************************************************************
+ BTREE PAGE LAYOUT
+ ************************************************************************/
+
+/* Each index references a group of bytes on the page. */
+#define B_KEYDATA 1 /* Key/data item. */
+#define B_DUPLICATE 2 /* Duplicate key/data item. */
+#define B_OVERFLOW 3 /* Overflow key/data item. */
+
+/*
+ * We have to store a deleted entry flag in the page. The reason is complex,
+ * but the simple version is that we can't delete on-page items referenced by
+ * a cursor -- the return order of subsequent insertions might be wrong. The
+ * delete flag is an overload of the top bit of the type byte.
+ */
+#define B_DELETE (0x80)
+#define B_DCLR(t) (t) &= ~B_DELETE
+#define B_DSET(t) (t) |= B_DELETE
+#define B_DISSET(t) ((t) & B_DELETE)
+
+#define B_TYPE(t) ((t) & ~B_DELETE)
+#define B_TSET(t, type, deleted) { \
+ (t) = (type); \
+ if (deleted) \
+ B_DSET(t); \
+}
+
+/*
+ * The first type is B_KEYDATA, represented by the BKEYDATA structure:
+ */
+typedef struct _bkeydata {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t data[1]; /* Variable length key/data item. */
+} BKEYDATA;
+
+/* Get a BKEYDATA item for a specific index. */
+#define GET_BKEYDATA(dbp, pg, indx) \
+ ((BKEYDATA *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BKEYDATA item to the page, with and
+ * without the index value.
+ */
+#define BKEYDATA_SIZE(len) \
+ ALIGN((len) + SSZA(BKEYDATA, data), sizeof(u_int32_t))
+#define BKEYDATA_PSIZE(len) \
+ (BKEYDATA_SIZE(len) + sizeof(db_indx_t))
+
+/*
+ * The second and third types are B_DUPLICATE and B_OVERFLOW, represented
+ * by the BOVERFLOW structure.
+ */
+typedef struct _boverflow {
+ db_indx_t unused1; /* 00-01: Padding, unused. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused2; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Next page number. */
+ u_int32_t tlen; /* 08-11: Total length of item. */
+} BOVERFLOW;
+
+/* Get a BOVERFLOW item for a specific index. */
+#define GET_BOVERFLOW(dbp, pg, indx) \
+ ((BOVERFLOW *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BOVERFLOW item to the page, with and
+ * without the index value. The (u_int16_t) cast avoids warnings: ALIGN
+ * casts to db_align_t, the cast converts it to a small integral type so
+ * we don't get complaints when we assign the final result to an integral
+ * type smaller than db_align_t.
+ */
+#define BOVERFLOW_SIZE \
+ ((u_int16_t)ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t)))
+#define BOVERFLOW_PSIZE \
+ (BOVERFLOW_SIZE + sizeof(db_indx_t))
+
+/*
+ * Btree leaf and hash page layouts group indices in sets of two, one for the
+ * key and one for the data. Everything else does it in sets of one to save
+ * space. Use the following macros so that it's real obvious what's going on.
+ */
+#define O_INDX 1
+#define P_INDX 2
+
+/************************************************************************
+ BTREE INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree internal entry.
+ */
+typedef struct _binternal {
+ db_indx_t len; /* 00-01: Key/data item length. */
+ u_int8_t type; /* 02: Page type AND DELETE FLAG. */
+ u_int8_t unused; /* 03: Padding, unused. */
+ db_pgno_t pgno; /* 04-07: Page number of referenced page. */
+ db_recno_t nrecs; /* 08-11: Subtree record count. */
+ u_int8_t data[1]; /* Variable length key item. */
+} BINTERNAL;
+
+/* Get a BINTERNAL item for a specific index. */
+#define GET_BINTERNAL(dbp, pg, indx) \
+ ((BINTERNAL *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new BINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define BINTERNAL_SIZE(len) \
+ ALIGN((len) + SSZA(BINTERNAL, data), sizeof(u_int32_t))
+#define BINTERNAL_PSIZE(len) \
+ (BINTERNAL_SIZE(len) + sizeof(db_indx_t))
+
+/************************************************************************
+ RECNO INTERNAL PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * The recno internal entry.
+ */
+typedef struct _rinternal {
+ db_pgno_t pgno; /* 00-03: Page number of referenced page. */
+ db_recno_t nrecs; /* 04-07: Subtree record count. */
+} RINTERNAL;
+
+/* Get a RINTERNAL item for a specific index. */
+#define GET_RINTERNAL(dbp, pg, indx) \
+ ((RINTERNAL *)P_ENTRY(dbp, pg, indx))
+
+/*
+ * Page space required to add a new RINTERNAL item to the page, with and
+ * without the index value.
+ */
+#define RINTERNAL_SIZE \
+ ALIGN(sizeof(RINTERNAL), sizeof(u_int32_t))
+#define RINTERNAL_PSIZE \
+ (RINTERNAL_SIZE + sizeof(db_indx_t))
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* !_DB_PAGE_H_ */
diff --git a/storage/bdb/dbinc/db_server_int.h b/storage/bdb/dbinc/db_server_int.h
new file mode 100644
index 00000000000..efec539b2f8
--- /dev/null
+++ b/storage/bdb/dbinc/db_server_int.h
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_server_int.h,v 1.23 2002/02/12 15:01:24 sue Exp $
+ */
+
+#ifndef _DB_SERVER_INT_H_
+#define _DB_SERVER_INT_H_
+
+#define DB_SERVER_TIMEOUT 300 /* 5 minutes */
+#define DB_SERVER_MAXTIMEOUT 1200 /* 20 minutes */
+#define DB_SERVER_IDLETIMEOUT 86400 /* 1 day */
+
+/*
+ * Ignore/mask off the following env->open flags:
+ * Most are illegal for a client to specify as they would control
+ * server resource usage. We will just ignore them.
+ * DB_LOCKDOWN
+ * DB_PRIVATE
+ * DB_RECOVER
+ * DB_RECOVER_FATAL
+ * DB_SYSTEM_MEM
+ * DB_USE_ENVIRON, DB_USE_ENVIRON_ROOT - handled on client
+ */
+#define DB_SERVER_FLAGMASK ( \
+DB_LOCKDOWN | DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | \
+DB_SYSTEM_MEM | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
+
+#define CT_CURSOR 0x001 /* Cursor */
+#define CT_DB 0x002 /* Database */
+#define CT_ENV 0x004 /* Env */
+#define CT_TXN 0x008 /* Txn */
+
+#define CT_JOIN 0x10000000 /* Join cursor component */
+#define CT_JOINCUR 0x20000000 /* Join cursor */
+
+typedef struct home_entry home_entry;
+struct home_entry {
+ LIST_ENTRY(home_entry) entries;
+ char *home;
+ char *dir;
+ char *name;
+ char *passwd;
+};
+
+/*
+ * Data needed for sharing handles.
+ * To share an env handle, on the open call, they must have matching
+ * env flags, and matching set_flags.
+ *
+ * To share a db handle on the open call, the db, subdb and flags must
+ * all be the same.
+ */
+#define DB_SERVER_ENVFLAGS ( \
+DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \
+DB_INIT_TXN | DB_JOINENV)
+
+#define DB_SERVER_DBFLAGS (DB_DIRTY_READ | DB_NOMMAP | DB_RDONLY)
+#define DB_SERVER_DBNOSHARE (DB_EXCL | DB_TRUNCATE)
+
+typedef struct ct_envdata ct_envdata;
+typedef struct ct_dbdata ct_dbdata;
+struct ct_envdata {
+ u_int32_t envflags;
+ u_int32_t onflags;
+ u_int32_t offflags;
+ home_entry *home;
+};
+
+struct ct_dbdata {
+ u_int32_t dbflags;
+ u_int32_t setflags;
+ char *db;
+ char *subdb;
+ DBTYPE type;
+};
+
+/*
+ * We maintain an activity timestamp for each handle. However, we
+ * set it to point, possibly to the ct_active field of its own handle
+ * or it may point to the ct_active field of a parent. In the case
+ * of nested transactions and any cursors within transactions it must
+ * point to the ct_active field of the ultimate parent of the transaction
+ * no matter how deeply it is nested.
+ */
+typedef struct ct_entry ct_entry;
+struct ct_entry {
+ LIST_ENTRY(ct_entry) entries; /* List of entries */
+ union {
+#ifdef __cplusplus
+ DbEnv *envp; /* H_ENV */
+ DbTxn *txnp; /* H_TXN */
+ Db *dbp; /* H_DB */
+ Dbc *dbc; /* H_CURSOR */
+#else
+ DB_ENV *envp; /* H_ENV */
+ DB_TXN *txnp; /* H_TXN */
+ DB *dbp; /* H_DB */
+ DBC *dbc; /* H_CURSOR */
+#endif
+ void *anyp;
+ } handle_u;
+ union { /* Private data per type */
+ ct_envdata envdp; /* Env info */
+ ct_dbdata dbdp; /* Db info */
+ } private_u;
+ long ct_id; /* Client ID */
+ long *ct_activep; /* Activity timestamp pointer*/
+ long *ct_origp; /* Original timestamp pointer*/
+ long ct_active; /* Activity timestamp */
+ long ct_timeout; /* Resource timeout */
+ long ct_idle; /* Idle timeout */
+ u_int32_t ct_refcount; /* Ref count for sharing */
+ u_int32_t ct_type; /* This entry's type */
+ struct ct_entry *ct_parent; /* Its parent */
+ struct ct_entry *ct_envparent; /* Its environment */
+};
+
+#define ct_envp handle_u.envp
+#define ct_txnp handle_u.txnp
+#define ct_dbp handle_u.dbp
+#define ct_dbc handle_u.dbc
+#define ct_anyp handle_u.anyp
+
+#define ct_envdp private_u.envdp
+#define ct_dbdp private_u.dbdp
+
+extern int __dbsrv_verbose;
+
+/*
+ * Get ctp and activate it.
+ * Assumes local variable 'replyp'.
+ * NOTE: May 'return' from macro.
+ */
+#define ACTIVATE_CTP(ctp, id, type) { \
+ (ctp) = get_tableent(id); \
+ if ((ctp) == NULL) { \
+ replyp->status = DB_NOSERVER_ID;\
+ return; \
+ } \
+ DB_ASSERT((ctp)->ct_type & (type)); \
+ __dbsrv_active(ctp); \
+}
+
+#endif /* !_DB_SERVER_INT_H_ */
diff --git a/storage/bdb/dbinc/db_shash.h b/storage/bdb/dbinc/db_shash.h
new file mode 100644
index 00000000000..2c54d6145c5
--- /dev/null
+++ b/storage/bdb/dbinc/db_shash.h
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_shash.h,v 11.11 2002/01/11 15:52:26 bostic Exp $
+ */
+
+#ifndef _DB_SHASH_H_
+#define _DB_SHASH_H_
+
+/* Hash Headers */
+typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB;
+
+/*
+ * HASHLOOKUP --
+ *
+ * Look up something in a shared memory hash table. The "elt" argument
+ * should be a key, and cmp_func must know how to compare a key to whatever
+ * structure it is that appears in the hash table. The comparison function
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into table for this item.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item for which we are searching in the hash table.
+ * res: the variable into which we'll store the element if we find it.
+ * cmp: called as: cmp(lookup_elt, table_elt).
+ *
+ * If the element is not in the hash table, this macro exits with res set
+ * to NULL.
+ */
+#define HASHLOOKUP(begin, ndx, type, field, elt, res, cmp) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ for (res = SH_TAILQ_FIRST(__bucket, type); \
+ res != NULL; res = SH_TAILQ_NEXT(res, field, type)) \
+ if (cmp(elt, res)) \
+ break; \
+} while (0)
+
+/*
+ * HASHINSERT --
+ *
+ * Insert a new entry into the hash table. This assumes that you already
+ * have the bucket locked and that lookup has failed; don't call it if you
+ * haven't already called HASHLOOKUP. If you do, you could get duplicate
+ * entries.
+ *
+ * begin: the beginning address of the hash table.
+ * ndx: the index for this element.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * elt: the item to be inserted.
+ */
+#define HASHINSERT(begin, ndx, type, field, elt) do { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_INSERT_HEAD(__bucket, elt, field, type); \
+} while (0)
+
+/*
+ * HASHREMOVE_EL --
+ * Given the object "obj" in the table, remove it.
+ *
+ * begin: address of the beginning of the hash table.
+ * ndx: index into hash table of where this element belongs.
+ * type: the structure type of the elements that are linked in each bucket.
+ * field: the name of the field by which the "type" structures are linked.
+ * obj: the object in the table that we with to delete.
+ */
+#define HASHREMOVE_EL(begin, ndx, type, field, obj) { \
+ DB_HASHTAB *__bucket; \
+ \
+ __bucket = &begin[ndx]; \
+ SH_TAILQ_REMOVE(__bucket, obj, field, type); \
+}
+#endif /* !_DB_SHASH_H_ */
diff --git a/storage/bdb/dbinc/db_swap.h b/storage/bdb/dbinc/db_swap.h
new file mode 100644
index 00000000000..d5aad65385e
--- /dev/null
+++ b/storage/bdb/dbinc/db_swap.h
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: db_swap.h,v 11.8 2002/01/11 15:52:26 bostic Exp $
+ */
+
+#ifndef _DB_SWAP_H_
+#define _DB_SWAP_H_
+
+/*
+ * Little endian <==> big endian 32-bit swap macros.
+ * M_32_SWAP swap a memory location
+ * P_32_COPY copy potentially unaligned 4 byte quantities
+ * P_32_SWAP swap a referenced memory location
+ */
+#define M_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ _tmp = a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)&a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_32_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+ ((u_int8_t *)b)[2] = ((u_int8_t *)a)[2]; \
+ ((u_int8_t *)b)[3] = ((u_int8_t *)a)[3]; \
+}
+#define P_32_SWAP(a) { \
+ u_int32_t _tmp; \
+ P_32_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[3]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[2]; \
+ ((u_int8_t *)a)[2] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[3] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+/*
+ * Little endian <==> big endian 16-bit swap macros.
+ * M_16_SWAP swap a memory location
+ * P_16_COPY copy potentially unaligned 2 byte quantities
+ * P_16_SWAP swap a referenced memory location
+ */
+#define M_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ _tmp = (u_int16_t)a; \
+ ((u_int8_t *)&a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)&a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+#define P_16_COPY(a, b) { \
+ ((u_int8_t *)b)[0] = ((u_int8_t *)a)[0]; \
+ ((u_int8_t *)b)[1] = ((u_int8_t *)a)[1]; \
+}
+#define P_16_SWAP(a) { \
+ u_int16_t _tmp; \
+ P_16_COPY(a, &_tmp); \
+ ((u_int8_t *)a)[0] = ((u_int8_t *)&_tmp)[1]; \
+ ((u_int8_t *)a)[1] = ((u_int8_t *)&_tmp)[0]; \
+}
+
+#define SWAP32(p) { \
+ P_32_SWAP(p); \
+ (p) += sizeof(u_int32_t); \
+}
+#define SWAP16(p) { \
+ P_16_SWAP(p); \
+ (p) += sizeof(u_int16_t); \
+}
+
+/*
+ * Berkeley DB has local versions of htonl() and ntohl() that operate on
+ * pointers to the right size memory locations; the portability magic for
+ * finding the real system functions isn't worth the effort.
+ */
+#define DB_HTONL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
+#define DB_NTOHL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
+
+#endif /* !_DB_SWAP_H_ */
diff --git a/storage/bdb/dbinc/db_upgrade.h b/storage/bdb/dbinc/db_upgrade.h
new file mode 100644
index 00000000000..3ccba810889
--- /dev/null
+++ b/storage/bdb/dbinc/db_upgrade.h
@@ -0,0 +1,242 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_upgrade.h,v 1.10 2002/01/11 15:52:26 bostic Exp $
+ */
+
+#ifndef _DB_UPGRADE_H_
+#define _DB_UPGRADE_H_
+
+/*
+ * This file defines the metadata pages from the previous release.
+ * These structures are only used to upgrade old versions of databases.
+ */
+
+/* Structures from the 3.1 release */
+typedef struct _dbmeta31 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ DB_LSN unused3; /* 36-39: Unused. */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA31;
+
+typedef struct _btmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-92: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA31;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA31;
+
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t start; /* 72-75: Start offset. */
+ u_int32_t first_recno; /* 76-79: First not deleted record. */
+ u_int32_t cur_recno; /* 80-83: Last recno allocated. */
+ u_int32_t re_len; /* 84-87: Fixed-length record length. */
+ u_int32_t re_pad; /* 88-91: Fixed-length record pad. */
+ u_int32_t rec_page; /* 92-95: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA31;
+/* Structures from the 3.2 release */
+typedef struct _qmeta32 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Last recno allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA32;
+
+/* Structures from the 3.0 release */
+
+typedef struct _dbmeta30 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ u_int32_t flags; /* 32-35: Flags: unique to each AM. */
+ /* 36-55: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA30;
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _btmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 56-59: Btree: Maxkey. */
+ u_int32_t minkey; /* 60-63: Btree: Minkey. */
+ u_int32_t re_len; /* 64-67: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 68-71: Recno: fixed-length record pad. */
+ u_int32_t root; /* 72-75: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA30;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 56-59: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 60-63: Modulo mask into table */
+ u_int32_t low_mask; /* 64-67: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 68-71: Fill factor */
+ u_int32_t nelem; /* 72-75: Number of keys in hash table */
+ u_int32_t h_charkey; /* 76-79: Value of hash(CHARKEY) */
+#define NCACHED30 32 /* number of spare points */
+ /* 80-207: Spare pages for overflow */
+ u_int32_t spares[NCACHED30];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA30;
+
+/************************************************************************
+ QUEUE METADATA PAGE LAYOUT
+ ************************************************************************/
+/*
+ * QAM Meta data page structure
+ *
+ */
+typedef struct _qmeta30 {
+ DBMETA30 dbmeta; /* 00-55: Generic meta-data header. */
+
+ u_int32_t start; /* 56-59: Start offset. */
+ u_int32_t first_recno; /* 60-63: First not deleted record. */
+ u_int32_t cur_recno; /* 64-67: Last recno allocated. */
+ u_int32_t re_len; /* 68-71: Fixed-length record length. */
+ u_int32_t re_pad; /* 72-75: Fixed-length record pad. */
+ u_int32_t rec_page; /* 76-79: Records Per Page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA30;
+
+/* Structures from Release 2.x */
+
+/************************************************************************
+ BTREE METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Btree metadata page layout:
+ */
+typedef struct _btmeta2X {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int32_t maxkey; /* 24-27: Btree: Maxkey. */
+ u_int32_t minkey; /* 28-31: Btree: Minkey. */
+ u_int32_t free; /* 32-35: Free list page number. */
+ u_int32_t flags; /* 36-39: Flags. */
+ u_int32_t re_len; /* 40-43: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 44-47: Recno: fixed-length record pad. */
+ /* 48-67: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} BTMETA2X;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+
+/*
+ * Hash metadata page layout:
+ */
+/* Hash Table Information */
+typedef struct hashhdr { /* Disk resident portion */
+ DB_LSN lsn; /* 00-07: LSN of the header page */
+ db_pgno_t pgno; /* 08-11: Page number (btree compatibility). */
+ u_int32_t magic; /* 12-15: Magic NO for hash tables */
+ u_int32_t version; /* 16-19: Version ID */
+ u_int32_t pagesize; /* 20-23: Bucket/Page Size */
+ u_int32_t ovfl_point; /* 24-27: Overflow page allocation location */
+ u_int32_t last_freed; /* 28-31: Last freed overflow page pgno */
+ u_int32_t max_bucket; /* 32-35: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 36-39: Modulo mask into table */
+ u_int32_t low_mask; /* 40-43: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 44-47: Fill factor */
+ u_int32_t nelem; /* 48-51: Number of keys in hash table */
+ u_int32_t h_charkey; /* 52-55: Value of hash(CHARKEY) */
+ u_int32_t flags; /* 56-59: Allow duplicates. */
+#define NCACHED2X 32 /* number of spare points */
+ /* 60-187: Spare pages for overflow */
+ u_int32_t spares[NCACHED2X];
+ /* 188-207: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HASHHDR;
+
+#endif /* !_DB_UPGRADE_H_ */
diff --git a/storage/bdb/dbinc/db_verify.h b/storage/bdb/dbinc/db_verify.h
new file mode 100644
index 00000000000..949c9a2a6a1
--- /dev/null
+++ b/storage/bdb/dbinc/db_verify.h
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_verify.h,v 1.26 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _DB_VERIFY_H_
+#define _DB_VERIFY_H_
+
+/*
+ * Structures and macros for the storage and retrieval of all information
+ * needed for inter-page verification of a database.
+ */
+
+/*
+ * EPRINT is the macro for error printing. Takes as an arg the arg set
+ * for DB->err.
+ */
+#define EPRINT(x) \
+ do { \
+ if (!LF_ISSET(DB_SALVAGE)) \
+ __db_err x; \
+ } while (0)
+
+/* For fatal type errors--i.e., verifier bugs. */
+#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \
+ EPRINT(((dbenv), "Page %lu: %s called on nonsensical page of type %lu", \
+ (u_long)(pgno), (func), (u_long)(ptype)));
+
+/* Complain about a totally zeroed page where we don't expect one. */
+#define ZEROPG_ERR_PRINT(dbenv, pgno, str) \
+ do { \
+ EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \
+ (u_long)(pgno), str, (u_long)P_INVALID)); \
+ EPRINT(((dbenv), "Page %lu: totally zeroed page", \
+ (u_long)(pgno))); \
+ } while (0)
+
+/*
+ * Note that 0 is, in general, a valid pgno, despite equalling PGNO_INVALID;
+ * we have to test it separately where it's not appropriate.
+ */
+#define IS_VALID_PGNO(x) ((x) <= vdp->last_pgno)
+
+/*
+ * Flags understood by the btree structure checks (esp. __bam_vrfy_subtree).
+ * These share the same space as the global flags to __db_verify, and must not
+ * dip below 0x00010000.
+ */
+#define ST_DUPOK 0x00010000 /* Duplicates are acceptable. */
+#define ST_DUPSET 0x00020000 /* Subtree is in a duplicate tree. */
+#define ST_DUPSORT 0x00040000 /* Duplicates are sorted. */
+#define ST_IS_RECNO 0x00080000 /* Subtree is a recno. */
+#define ST_OVFL_LEAF 0x00100000 /* Overflow reffed from leaf page. */
+#define ST_RECNUM 0x00200000 /* Subtree has record numbering on. */
+#define ST_RELEN 0x00400000 /* Subtree has fixed-length records. */
+#define ST_TOPLEVEL 0x00800000 /* Subtree == entire tree */
+
+/*
+ * Flags understood by __bam_salvage and __db_salvage. These need not share
+ * the same space with the __bam_vrfy_subtree flags, but must share with
+ * __db_verify.
+ */
+#define SA_SKIPFIRSTKEY 0x00080000
+
+/*
+ * VRFY_DBINFO is the fundamental structure; it either represents the database
+ * of subdatabases, or the sole database if there are no subdatabases.
+ */
+struct __vrfy_dbinfo {
+ /* Info about this database in particular. */
+ DBTYPE type;
+
+ /* List of subdatabase meta pages, if any. */
+ LIST_HEAD(__subdbs, __vrfy_childinfo) subdbs;
+
+ /* File-global info--stores VRFY_PAGEINFOs for each page. */
+ DB *pgdbp;
+
+ /* Child database--stores VRFY_CHILDINFOs of each page. */
+ DB *cdbp;
+
+ /* Page info structures currently in use. */
+ LIST_HEAD(__activepips, __vrfy_pageinfo) activepips;
+
+ /*
+ * DB we use to keep track of which pages are linked somehow
+ * during verification. 0 is the default, "unseen"; 1 is seen.
+ */
+ DB *pgset;
+
+ /*
+ * This is a database we use during salvaging to keep track of which
+ * overflow and dup pages we need to come back to at the end and print
+ * with key "UNKNOWN". Pages which print with a good key get set
+ * to SALVAGE_IGNORE; others get set, as appropriate, to SALVAGE_LDUP,
+ * SALVAGE_LRECNODUP, SALVAGE_OVERFLOW for normal db overflow pages,
+ * and SALVAGE_BTREE, SALVAGE_LRECNO, and SALVAGE_HASH for subdb
+ * pages.
+ */
+#define SALVAGE_INVALID 0
+#define SALVAGE_IGNORE 1
+#define SALVAGE_LDUP 2
+#define SALVAGE_LRECNODUP 3
+#define SALVAGE_OVERFLOW 4
+#define SALVAGE_LBTREE 5
+#define SALVAGE_HASH 6
+#define SALVAGE_LRECNO 7
+ DB *salvage_pages;
+
+ db_pgno_t last_pgno;
+ db_pgno_t pgs_remaining; /* For dbp->db_feedback(). */
+
+ /*
+ * These are used during __bam_vrfy_subtree to keep track, while
+ * walking up and down the Btree structure, of the prev- and next-page
+ * chain of leaf pages and verify that it's intact. Also, make sure
+ * that this chain contains pages of only one type.
+ */
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ u_int8_t leaf_type;
+
+ /* Queue needs these to verify data pages in the first pass. */
+ u_int32_t re_len;
+ u_int32_t rec_page;
+
+#define SALVAGE_PRINTABLE 0x01 /* Output printable chars literally. */
+#define SALVAGE_PRINTHEADER 0x02 /* Print the unknown-key header. */
+#define SALVAGE_PRINTFOOTER 0x04 /* Print the unknown-key footer. */
+ u_int32_t flags;
+}; /* VRFY_DBINFO */
+
+/*
+ * The amount of state information we need per-page is small enough that
+ * it's not worth the trouble to define separate structures for each
+ * possible type of page, and since we're doing verification with these we
+ * have to be open to the possibility that page N will be of a completely
+ * unexpected type anyway. So we define one structure here with all the
+ * info we need for inter-page verification.
+ */
+struct __vrfy_pageinfo {
+ u_int8_t type;
+ u_int8_t bt_level;
+ u_int8_t unused1;
+ u_int8_t unused2;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+
+ /* meta pages */
+ db_pgno_t root;
+ db_pgno_t free; /* Free list head. */
+
+ db_indx_t entries; /* Actual number of entries. */
+ u_int16_t unused;
+ db_recno_t rec_cnt; /* Record count. */
+ u_int32_t re_len; /* Record length. */
+ u_int32_t bt_minkey;
+ u_int32_t bt_maxkey;
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+
+ /* overflow pages */
+ /*
+ * Note that refcount is the refcount for an overflow page; pi_refcount
+ * is this structure's own refcount!
+ */
+ u_int32_t refcount;
+ u_int32_t olen;
+
+#define VRFY_DUPS_UNSORTED 0x0001 /* Have to flag the negative! */
+#define VRFY_HAS_DUPS 0x0002
+#define VRFY_HAS_DUPSORT 0x0004 /* Has the flag set. */
+#define VRFY_HAS_SUBDBS 0x0008
+#define VRFY_HAS_RECNUMS 0x0010
+#define VRFY_INCOMPLETE 0x0020 /* Meta or item order checks incomp. */
+#define VRFY_IS_ALLZEROES 0x0040 /* Hash page we haven't touched? */
+#define VRFY_IS_FIXEDLEN 0x0080
+#define VRFY_IS_RECNO 0x0100
+#define VRFY_IS_RRECNO 0x0200
+#define VRFY_OVFL_LEAFSEEN 0x0400
+ u_int32_t flags;
+
+ LIST_ENTRY(__vrfy_pageinfo) links;
+ u_int32_t pi_refcount;
+}; /* VRFY_PAGEINFO */
+
+struct __vrfy_childinfo {
+ db_pgno_t pgno;
+
+#define V_DUPLICATE 1 /* off-page dup metadata */
+#define V_OVERFLOW 2 /* overflow page */
+#define V_RECNO 3 /* btree internal or leaf page */
+ u_int32_t type;
+ db_recno_t nrecs; /* record count on a btree subtree */
+ u_int32_t tlen; /* ovfl. item total size */
+
+ LIST_ENTRY(__vrfy_childinfo) links;
+}; /* VRFY_CHILDINFO */
+
+#endif /* !_DB_VERIFY_H_ */
diff --git a/storage/bdb/dbinc/debug.h b/storage/bdb/dbinc/debug.h
new file mode 100644
index 00000000000..21f80387ccc
--- /dev/null
+++ b/storage/bdb/dbinc/debug.h
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: debug.h,v 11.31 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _DB_DEBUG_H_
+#define _DB_DEBUG_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * When running with #DIAGNOSTIC defined, we smash memory and do memory
+ * guarding with a special byte value.
+ */
+#define CLEAR_BYTE 0xdb
+#define GUARD_BYTE 0xdc
+
+/*
+ * DB assertions.
+ */
+#if defined(DIAGNOSTIC) && defined(__STDC__)
+#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__))
+#else
+#define DB_ASSERT(e)
+#endif
+
+/*
+ * Purify and other run-time tools complain about uninitialized reads/writes
+ * of structure fields whose only purpose is padding, as well as when heap
+ * memory that was never initialized is written to disk.
+ */
+#ifdef UMRW
+#define UMRW_SET(v) (v) = 0
+#else
+#define UMRW_SET(v)
+#endif
+
+/*
+ * Error message handling. Use a macro instead of a function because va_list
+ * references to variadic arguments cannot be reset to the beginning of the
+ * variadic argument list (and then rescanned), by functions other than the
+ * original routine that took the variadic list of arguments.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap, fmt); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#else
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#endif
+
+/*
+ * Debugging macro to log operations.
+ * If DEBUG_WOP is defined, log operations that modify the database.
+ * If DEBUG_ROP is defined, log operations that read the database.
+ *
+ * D dbp
+ * T txn
+ * O operation (string)
+ * K key
+ * A data
+ * F flags
+ */
+#define LOG_OP(C, T, O, K, A, F) { \
+ DB_LSN __lsn; \
+ DBT __op; \
+ if (DBC_LOGGING((C))) { \
+ memset(&__op, 0, sizeof(__op)); \
+ __op.data = O; \
+ __op.size = strlen(O) + 1; \
+ (void)__db_debug_log((C)->dbp->dbenv, T, &__lsn, 0, \
+ &__op, (C)->dbp->log_filename->id, K, A, F); \
+ } \
+}
+#ifdef DEBUG_ROP
+#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LREAD(C, T, O, K, A, F)
+#endif
+#ifdef DEBUG_WOP
+#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LWRITE(C, T, O, K, A, F)
+#endif
+
+/*
+ * Hook for testing recovery at various places in the create/delete paths.
+ * Hook for testing subdb locks.
+ */
+#if CONFIG_TEST
+#define DB_TEST_SUBLOCKS(env, flags) \
+do { \
+ if ((env)->test_abort == DB_TEST_SUBDB_LOCKS) \
+ (flags) |= DB_LOCK_NOWAIT; \
+} while (0)
+
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((env)); \
+ if ((env)->test_copy == (val)) { \
+ /* COPY the FILE */ \
+ if ((__ret = __db_testcopy((env), NULL, (name))) != 0) \
+ (ret) = __db_panic((env), __ret); \
+ } \
+ if ((env)->test_abort == (val)) { \
+ /* ABORT the TXN */ \
+ (env)->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY(dbp, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((dbp)->dbenv); \
+ if ((dbp)->dbenv->test_copy == (val)) { \
+ /* Copy the file. */ \
+ if (F_ISSET((dbp), \
+ DB_AM_OPEN_CALLED) && (dbp)->mpf != NULL) \
+ (void)(dbp)->sync((dbp), 0); \
+ if ((__ret = \
+ __db_testcopy((dbp)->dbenv, (dbp), (name))) != 0) \
+ (ret) = __db_panic((dbp)->dbenv, __ret); \
+ } \
+ if ((dbp)->dbenv->test_abort == (val)) { \
+ /* Abort the transaction. */ \
+ (dbp)->dbenv->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY_LABEL db_tr_err:
+#else
+#define DB_TEST_SUBLOCKS(env, flags)
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name)
+#define DB_TEST_RECOVERY(dbp, val, ret, name)
+#define DB_TEST_RECOVERY_LABEL
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_DEBUG_H_ */
diff --git a/storage/bdb/dbinc/fop.h b/storage/bdb/dbinc/fop.h
new file mode 100644
index 00000000000..c438ef7ef40
--- /dev/null
+++ b/storage/bdb/dbinc/fop.h
@@ -0,0 +1,16 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: fop.h,v 11.3 2002/03/27 04:34:54 bostic Exp $
+ */
+
+#ifndef _FOP_H_
+#define _FOP_H_
+
+#include "dbinc_auto/fileops_auto.h"
+#include "dbinc_auto/fileops_ext.h"
+
+#endif /* !_FOP_H_ */
diff --git a/storage/bdb/dbinc/globals.h b/storage/bdb/dbinc/globals.h
new file mode 100644
index 00000000000..3441ade2ea9
--- /dev/null
+++ b/storage/bdb/dbinc/globals.h
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: globals.h,v 11.1 2002/07/12 18:56:41 bostic Exp $
+ */
+
+/*******************************************************
+ * Global variables.
+ *
+ * Held in a single structure to minimize the name-space pollution.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+typedef struct __db_globals {
+ u_int32_t no_write_errors; /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+
+ int (*j_close) __P((int)); /* Underlying OS interface jump table.*/
+ void (*j_dirfree) __P((char **, int));
+ int (*j_dirlist) __P((const char *, char ***, int *));
+ int (*j_exists) __P((const char *, int *));
+ void (*j_free) __P((void *));
+ int (*j_fsync) __P((int));
+ int (*j_ioinfo) __P((const char *,
+ int, u_int32_t *, u_int32_t *, u_int32_t *));
+ void *(*j_malloc) __P((size_t));
+ int (*j_map) __P((char *, size_t, int, int, void **));
+ int (*j_open) __P((const char *, int, ...));
+ ssize_t (*j_read) __P((int, void *, size_t));
+ void *(*j_realloc) __P((void *, size_t));
+ int (*j_rename) __P((const char *, const char *));
+ int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+ int (*j_sleep) __P((u_long, u_long));
+ int (*j_unlink) __P((const char *));
+ int (*j_unmap) __P((void *, size_t));
+ ssize_t (*j_write) __P((int, const void *, size_t));
+ int (*j_yield) __P((void));
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ 0, /* VxWorks: initialized */
+ NULL, /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ {NULL, &__db_global_values.db_envq.tqh_first},
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+
+#define DB_GLOBAL(v) __db_global_values.v
diff --git a/storage/bdb/dbinc/hash.h b/storage/bdb/dbinc/hash.h
new file mode 100644
index 00000000000..98289735fc4
--- /dev/null
+++ b/storage/bdb/dbinc/hash.h
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: hash.h,v 11.26 2002/03/27 04:34:54 bostic Exp $
+ */
+
+#ifndef _DB_HASH_H_
+#define _DB_HASH_H_
+
+/* Hash internal structure. */
+typedef struct hash_t {
+ db_pgno_t meta_pgno; /* Page number of the meta data page. */
+ u_int32_t h_ffactor; /* Fill factor. */
+ u_int32_t h_nelem; /* Number of elements. */
+ /* Hash function. */
+ u_int32_t (*h_hash) __P((DB *, const void *, u_int32_t));
+} HASH;
+
+/* Cursor structure definitions. */
+typedef struct cursor_t {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Hash private part */
+
+ /* Per-thread information */
+ DB_LOCK hlock; /* Metadata page lock. */
+ HMETA *hdr; /* Pointer to meta-data page. */
+ PAGE *split_buf; /* Temporary buffer for splits. */
+
+ /* Hash cursor information */
+ db_pgno_t bucket; /* Bucket we are traversing. */
+ db_pgno_t lbucket; /* Bucket for which we are locked. */
+ db_indx_t dup_off; /* Offset within a duplicate set. */
+ db_indx_t dup_len; /* Length of current duplicate. */
+ db_indx_t dup_tlen; /* Total length of duplicate entry. */
+ u_int32_t seek_size; /* Number of bytes we need for add. */
+ db_pgno_t seek_found_page;/* Page on which we can insert. */
+ u_int32_t order; /* Relative order among deleted curs. */
+
+#define H_CONTINUE 0x0001 /* Join--search strictly fwd for data */
+#define H_DELETED 0x0002 /* Cursor item is deleted. */
+#define H_DIRTY 0x0004 /* Meta-data page needs to be written */
+#define H_DUPONLY 0x0008 /* Dups only; do not change key. */
+#define H_EXPAND 0x0010 /* Table expanded. */
+#define H_ISDUP 0x0020 /* Cursor is within duplicate set. */
+#define H_NEXT_NODUP 0x0040 /* Get next non-dup entry. */
+#define H_NOMORE 0x0080 /* No more entries in bucket. */
+#define H_OK 0x0100 /* Request succeeded. */
+ u_int32_t flags;
+} HASH_CURSOR;
+
+/* Test string. */
+#define CHARKEY "%$sniglet^&"
+
+/* Overflow management */
+/*
+ * The spares table indicates the page number at which each doubling begins.
+ * From this page number we subtract the number of buckets already allocated
+ * so that we can do a simple addition to calculate the page number here.
+ */
+#define BS_TO_PAGE(bucket, spares) \
+ ((bucket) + (spares)[__db_log2((bucket) + 1)])
+#define BUCKET_TO_PAGE(I, B) (BS_TO_PAGE((B), (I)->hdr->spares))
+
+/* Constraints about much data goes on a page. */
+
+#define MINFILL 4
+#define ISBIG(I, N) (((N) > ((I)->hdr->dbmeta.pagesize / MINFILL)) ? 1 : 0)
+
+/* Shorthands for accessing structure */
+#define NDX_INVALID 0xFFFF
+#define BUCKET_INVALID 0xFFFFFFFF
+
+/* On page duplicates are stored as a string of size-data-size triples. */
+#define DUP_SIZE(len) ((len) + 2 * sizeof(db_indx_t))
+
+/* Log messages types (these are subtypes within a record type) */
+#define PAIR_KEYMASK 0x1
+#define PAIR_DATAMASK 0x2
+#define PAIR_DUPMASK 0x4
+#define PAIR_MASK 0xf
+#define PAIR_ISKEYBIG(N) (N & PAIR_KEYMASK)
+#define PAIR_ISDATABIG(N) (N & PAIR_DATAMASK)
+#define PAIR_ISDATADUP(N) (N & PAIR_DUPMASK)
+#define OPCODE_OF(N) (N & ~PAIR_MASK)
+
+#define PUTPAIR 0x20
+#define DELPAIR 0x30
+#define PUTOVFL 0x40
+#define DELOVFL 0x50
+#define HASH_UNUSED1 0x60
+#define HASH_UNUSED2 0x70
+#define SPLITOLD 0x80
+#define SPLITNEW 0x90
+
+typedef enum {
+ DB_HAM_CHGPG = 1,
+ DB_HAM_DELFIRSTPG = 2,
+ DB_HAM_DELMIDPG = 3,
+ DB_HAM_DELLASTPG = 4,
+ DB_HAM_DUP = 5,
+ DB_HAM_SPLIT = 6
+} db_ham_mode;
+
+#include "dbinc_auto/hash_auto.h"
+#include "dbinc_auto/hash_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_HASH_H_ */
diff --git a/storage/bdb/dbinc/hmac.h b/storage/bdb/dbinc/hmac.h
new file mode 100644
index 00000000000..16f61fb58ad
--- /dev/null
+++ b/storage/bdb/dbinc/hmac.h
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: hmac.h,v 1.3 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _DB_HMAC_H_
+#define _DB_HMAC_H_
+
+/*
+ * Algorithm specific information.
+ */
+/*
+ * SHA1 checksumming
+ */
+typedef struct {
+ u_int32_t state[5];
+ u_int32_t count[2];
+ unsigned char buffer[64];
+} SHA1_CTX;
+
+/*
+ * AES assumes the SHA1 checksumming (also called MAC)
+ */
+#define DB_MAC_MAGIC "mac derivation key magic value"
+#define DB_ENC_MAGIC "encryption and decryption key value magic"
+
+#include "dbinc_auto/hmac_ext.h"
+#endif /* !_DB_HMAC_H_ */
diff --git a/storage/bdb/dbinc/lock.h b/storage/bdb/dbinc/lock.h
new file mode 100644
index 00000000000..7ddc9ce9988
--- /dev/null
+++ b/storage/bdb/dbinc/lock.h
@@ -0,0 +1,212 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: lock.h,v 11.42 2002/05/18 01:34:13 bostic Exp $
+ */
+
+#ifndef _DB_LOCK_H_
+#define _DB_LOCK_H_
+
+#define DB_LOCK_DEFAULT_N 1000 /* Default # of locks in region. */
+
+/*
+ * The locker id space is divided between the transaction manager and the lock
+ * manager. Lock IDs start at 1 and go to DB_LOCK_MAXID. Txn IDs start at
+ * DB_LOCK_MAXID + 1 and go up to TXN_MAXIMUM.
+ */
+#define DB_LOCK_INVALIDID 0
+#define DB_LOCK_MAXID 0x7fffffff
+
+/*
+ * Out of band value for a lock. Locks contain an offset into a lock region,
+ * so we use an invalid region offset to indicate an invalid or unset lock.
+ */
+#define LOCK_INVALID INVALID_ROFF
+#define LOCK_ISSET(lock) ((lock).off != LOCK_INVALID)
+#define LOCK_INIT(lock) ((lock).off = LOCK_INVALID)
+
+/*
+ * Macro to identify a write lock for the purpose of counting locks
+ * for the NUMWRITES option to deadlock detection.
+ */
+#define IS_WRITELOCK(m) \
+ ((m) == DB_LOCK_WRITE || (m) == DB_LOCK_IWRITE || (m) == DB_LOCK_IWR)
+
+/*
+ * Lock timers.
+ */
+typedef struct {
+ u_int32_t tv_sec; /* Seconds. */
+ u_int32_t tv_usec; /* Microseconds. */
+} db_timeval_t;
+
+#define LOCK_TIME_ISVALID(time) ((time)->tv_sec != 0)
+#define LOCK_SET_TIME_INVALID(time) ((time)->tv_sec = 0)
+#define LOCK_TIME_EQUAL(t1, t2) \
+ ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec == (t2)->tv_usec)
+
+/*
+ * DB_LOCKREGION --
+ * The lock shared region.
+ */
+typedef struct __db_lockregion {
+ u_int32_t need_dd; /* flag for deadlock detector */
+ u_int32_t detect; /* run dd on every conflict */
+ /* free lock header */
+ SH_TAILQ_HEAD(__flock) free_locks;
+ /* free obj header */
+ SH_TAILQ_HEAD(__fobj) free_objs;
+ /* free locker header */
+ SH_TAILQ_HEAD(__flocker) free_lockers;
+ SH_TAILQ_HEAD(__dobj) dd_objs; /* objects with waiters */
+ SH_TAILQ_HEAD(__lkrs) lockers; /* list of lockers */
+
+ db_timeout_t lk_timeout; /* timeout for locks. */
+ db_timeout_t tx_timeout; /* timeout for txns. */
+
+ u_int32_t locker_t_size; /* size of locker hash table */
+ u_int32_t object_t_size; /* size of object hash table */
+
+ roff_t conf_off; /* offset of conflicts array */
+ roff_t obj_off; /* offset of object hash table */
+ roff_t osynch_off; /* offset of the object mutex table */
+ roff_t locker_off; /* offset of locker hash table */
+ roff_t lsynch_off; /* offset of the locker mutex table */
+
+ DB_LOCK_STAT stat; /* stats about locking. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+} DB_LOCKREGION;
+
+/*
+ * Since we will store DBTs in shared memory, we need the equivalent of a
+ * DBT that will work in shared memory.
+ */
+typedef struct __sh_dbt {
+ u_int32_t size; /* Byte length. */
+ ssize_t off; /* Region offset. */
+} SH_DBT;
+
+#define SH_DBT_PTR(p) ((void *)(((u_int8_t *)(p)) + (p)->off))
+
+/*
+ * Object structures; these live in the object hash table.
+ */
+typedef struct __db_lockobj {
+ SH_DBT lockobj; /* Identifies object locked. */
+ SH_TAILQ_ENTRY links; /* Links for free list or hash list. */
+ SH_TAILQ_ENTRY dd_links; /* Links for dd list. */
+ SH_TAILQ_HEAD(__wait) waiters; /* List of waiting locks. */
+ SH_TAILQ_HEAD(__hold) holders; /* List of held locks. */
+ /* Declare room in the object to hold
+ * typical DB lock structures so that
+ * we do not have to allocate them from
+ * shalloc at run-time. */
+ u_int8_t objdata[sizeof(struct __db_ilock)];
+} DB_LOCKOBJ;
+
+/*
+ * Locker structures; these live in the locker hash table.
+ */
+typedef struct __db_locker {
+ u_int32_t id; /* Locker id. */
+ u_int32_t dd_id; /* Deadlock detector id. */
+ u_int32_t nlocks; /* Number of locks held. */
+ u_int32_t nwrites; /* Number of write locks held. */
+ size_t master_locker; /* Locker of master transaction. */
+ size_t parent_locker; /* Parent of this child. */
+ SH_LIST_HEAD(_child) child_locker; /* List of descendant txns;
+ only used in a "master"
+ txn. */
+ SH_LIST_ENTRY child_link; /* Links transactions in the family;
+ elements of the child_locker
+ list. */
+ SH_TAILQ_ENTRY links; /* Links for free and hash list. */
+ SH_TAILQ_ENTRY ulinks; /* Links in-use list. */
+ SH_LIST_HEAD(_held) heldby; /* Locks held by this locker. */
+ db_timeval_t lk_expire; /* When current lock expires. */
+ db_timeval_t tx_expire; /* When this txn expires. */
+ db_timeout_t lk_timeout; /* How long do we let locks live. */
+
+#define DB_LOCKER_DELETED 0x0001
+#define DB_LOCKER_DIRTY 0x0002
+#define DB_LOCKER_INABORT 0x0004
+#define DB_LOCKER_TIMEOUT 0x0008
+ u_int32_t flags;
+} DB_LOCKER;
+
+/*
+ * DB_LOCKTAB --
+ * The primary library lock data structure (i.e., the one referenced
+ * by the environment, as opposed to the internal one laid out in the region.)
+ */
+typedef struct __db_locktab {
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+ u_int8_t *conflicts; /* Pointer to conflict matrix. */
+ DB_HASHTAB *obj_tab; /* Beginning of object hash table. */
+ DB_HASHTAB *locker_tab; /* Beginning of locker hash table. */
+} DB_LOCKTAB;
+
+/* Test for conflicts. */
+#define CONFLICTS(T, R, HELD, WANTED) \
+ (T)->conflicts[(HELD) * (R)->stat.st_nmodes + (WANTED)]
+
+#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1)
+
+struct __db_lock {
+ /*
+ * Wait on mutex to wait on lock. You reference your own mutex with
+ * ID 0 and others reference your mutex with ID 1.
+ */
+ DB_MUTEX mutex;
+
+ u_int32_t holder; /* Who holds this lock. */
+ u_int32_t gen; /* Generation count. */
+ SH_TAILQ_ENTRY links; /* Free or holder/waiter list. */
+ SH_LIST_ENTRY locker_links; /* List of locks held by a locker. */
+ u_int32_t refcount; /* Reference count the lock. */
+ db_lockmode_t mode; /* What sort of lock. */
+ ssize_t obj; /* Relative offset of object struct. */
+ db_status_t status; /* Status of this lock. */
+};
+
+/*
+ * Flag values for __lock_put_internal:
+ * DB_LOCK_DOALL: Unlock all references in this lock (instead of only 1).
+ * DB_LOCK_FREE: Free the lock (used in checklocker).
+ * DB_LOCK_IGNOREDEL: Remove from the locker hash table even if already
+ deleted (used in checklocker).
+ * DB_LOCK_NOPROMOTE: Don't bother running promotion when releasing locks
+ * (used by __lock_put_internal).
+ * DB_LOCK_UNLINK: Remove from the locker links (used in checklocker).
+ * Make sure that these do not conflict with the interface flags because
+ * we pass some of those around (i.e., DB_LOCK_REMOVE).
+ */
+#define DB_LOCK_DOALL 0x010000
+#define DB_LOCK_FREE 0x020000
+#define DB_LOCK_IGNOREDEL 0x040000
+#define DB_LOCK_NOPROMOTE 0x080000
+#define DB_LOCK_UNLINK 0x100000
+#define DB_LOCK_NOWAITERS 0x200000
+
+/*
+ * Macros to get/release different types of mutexes.
+ */
+#define OBJECT_LOCK(lt, reg, obj, ndx) \
+ ndx = __lock_ohash(obj) % (reg)->object_t_size
+#define SHOBJECT_LOCK(lt, reg, shobj, ndx) \
+ ndx = __lock_lhash(shobj) % (reg)->object_t_size
+#define LOCKER_LOCK(lt, reg, locker, ndx) \
+ ndx = __lock_locker_hash(locker) % (reg)->locker_t_size;
+
+#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(lt)->reginfo)
+#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &(lt)->reginfo)
+
+#include "dbinc_auto/lock_ext.h"
+#endif /* !_DB_LOCK_H_ */
diff --git a/storage/bdb/dbinc/log.h b/storage/bdb/dbinc/log.h
new file mode 100644
index 00000000000..434994528ea
--- /dev/null
+++ b/storage/bdb/dbinc/log.h
@@ -0,0 +1,273 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: log.h,v 11.60 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _LOG_H_
+#define _LOG_H_
+
+struct __db_log; typedef struct __db_log DB_LOG;
+struct __hdr; typedef struct __hdr HDR;
+struct __log; typedef struct __log LOG;
+struct __log_persist; typedef struct __log_persist LOGP;
+
+#define LFPREFIX "log." /* Log file name prefix. */
+#define LFNAME "log.%010d" /* Log file name template. */
+#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */
+
+#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */
+#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */
+#define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */
+
+/*
+ * The per-process table that maps log file-id's to DB structures.
+ */
+typedef struct __db_entry {
+ DB *dbp; /* Open dbp for this file id. */
+ int deleted; /* File was not found during open. */
+} DB_ENTRY;
+
+/*
+ * DB_LOG
+ * Per-process log structure.
+ */
+struct __db_log {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ DB_MUTEX *mutexp; /* Mutex for thread protection. */
+
+ DB_ENTRY *dbentry; /* Recovery file-id mapping. */
+#define DB_GROW_SIZE 64
+ int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */
+
+/*
+ * These fields are always accessed while the region lock is held, so they do
+ * not have to be protected by the thread lock as well, OR, they are only used
+ * when threads are not being used, i.e. most cursor operations are disallowed
+ * on threaded logs.
+ */
+ u_int32_t lfname; /* Log file "name". */
+ DB_FH lfh; /* Log file handle. */
+
+ u_int8_t *bufp; /* Region buffer. */
+
+/* These fields are not protected. */
+ DB_ENV *dbenv; /* Reference to error information. */
+ REGINFO reginfo; /* Region information. */
+
+#define DBLOG_RECOVER 0x01 /* We are in recovery. */
+#define DBLOG_FORCE_OPEN 0x02 /* Force the DB open even if it appears
+ * to be deleted.
+ */
+ u_int32_t flags;
+};
+
+/*
+ * HDR --
+ * Log record header.
+ */
+struct __hdr {
+ u_int32_t prev; /* Previous offset. */
+ u_int32_t len; /* Current length. */
+ u_int8_t chksum[DB_MAC_KEY]; /* Current checksum. */
+ u_int8_t iv[DB_IV_BYTES]; /* IV */
+ u_int32_t orig_size; /* Original size of log record */
+ /* !!! - 'size' is not written to log, must be last in hdr */
+ size_t size; /* Size of header to use */
+};
+
+/*
+ * We use HDR internally, and then when we write out, we write out
+ * prev, len, and then a 4-byte checksum if normal operation or
+ * a crypto-checksum and IV and original size if running in crypto
+ * mode. We must store the original size in case we pad. Set the
+ * size when we set up the header. We compute a DB_MAC_KEY size
+ * checksum regardless, but we can safely just use the first 4 bytes.
+ */
+#define HDR_NORMAL_SZ 12
+#define HDR_CRYPTO_SZ 12 + DB_MAC_KEY + DB_IV_BYTES
+
+struct __log_persist {
+ u_int32_t magic; /* DB_LOGMAGIC */
+ u_int32_t version; /* DB_LOGVERSION */
+
+ u_int32_t log_size; /* Log file size. */
+ u_int32_t mode; /* Log file mode. */
+};
+
+/*
+ * LOG --
+ * Shared log region. One of these is allocated in shared memory,
+ * and describes the log.
+ */
+struct __log {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * flush_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX fq_mutex; /* Mutex guarding file name list. */
+
+ LOGP persist; /* Persistent information. */
+
+ SH_TAILQ_HEAD(__fq1) fq; /* List of file names. */
+ int32_t fid_max; /* Max fid allocated. */
+ roff_t free_fid_stack; /* Stack of free file ids. */
+ int free_fids; /* Height of free fid stack. */
+ int free_fids_alloced; /* Number of free fid slots alloc'ed. */
+
+ /*
+ * The lsn LSN is the file offset that we're about to write and which
+ * we will return to the user.
+ */
+ DB_LSN lsn; /* LSN at current file offset. */
+
+ /*
+ * The f_lsn LSN is the LSN (returned to the user) that "owns" the
+ * first byte of the buffer. If the record associated with the LSN
+ * spans buffers, it may not reflect the physical file location of
+ * the first byte of the buffer.
+ */
+ DB_LSN f_lsn; /* LSN of first byte in the buffer. */
+ size_t b_off; /* Current offset in the buffer. */
+ u_int32_t w_off; /* Current write offset in the file. */
+ u_int32_t len; /* Length of the last record. */
+
+ /*
+ * The s_lsn LSN is the last LSN that we know is on disk, not just
+ * written, but synced. This field is protected by the flush mutex
+ * rather than by the region mutex.
+ */
+ int in_flush; /* Log flush in progress. */
+ roff_t flush_mutex_off; /* Mutex guarding flushing. */
+ DB_LSN s_lsn; /* LSN of the last sync. */
+
+ DB_LOG_STAT stat; /* Log statistics. */
+
+ /*
+ * The waiting_lsn is used by the replication system. It is the
+ * first LSN that we are holding without putting in the log, because
+ * we received one or more log records out of order. Associated with
+ * the waiting_lsn is the number of log records that we still have to
+ * receive before we decide that we should request it again.
+ */
+ DB_LSN waiting_lsn; /* First log record after a gap. */
+ DB_LSN verify_lsn; /* LSN we are waiting to verify. */
+ u_int32_t wait_recs; /* Records to wait before requesting. */
+ u_int32_t rcvd_recs; /* Records received while waiting. */
+
+ /*
+ * The ready_lsn is also used by the replication system. It is the
+ * next LSN we expect to receive. It's normally equal to "lsn",
+ * except at the beginning of a log file, at which point it's set
+ * to the LSN of the first record of the new file (after the
+ * header), rather than to 0.
+ */
+ DB_LSN ready_lsn;
+
+ /*
+ * During initialization, the log system walks forward through the
+ * last log file to find its end. If it runs into a checkpoint
+ * while it's doing so, it caches it here so that the transaction
+ * system doesn't need to walk through the file again on its
+ * initialization.
+ */
+ DB_LSN cached_ckp_lsn;
+
+ roff_t buffer_off; /* Log buffer offset in the region. */
+ u_int32_t buffer_size; /* Log buffer size. */
+
+ u_int32_t log_size; /* Log file's size. */
+ u_int32_t log_nsize; /* Next log file's size. */
+
+ u_int32_t ncommit; /* Number of txns waiting to commit. */
+
+ DB_LSN t_lsn; /* LSN of first commit */
+ SH_TAILQ_HEAD(__commit) commits;/* list of txns waiting to commit. */
+ SH_TAILQ_HEAD(__free) free_commits;/* free list of commit structs. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define LG_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
+
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
+
+/*
+ * __db_commit structure --
+ * One of these is allocated for each transaction waiting
+ * to commit.
+ */
+struct __db_commit {
+ DB_MUTEX mutex; /* Mutex for txn to wait on. */
+ DB_LSN lsn; /* LSN of commit record. */
+ SH_TAILQ_ENTRY links; /* Either on free or waiting list. */
+
+#define DB_COMMIT_FLUSH 0x0001 /* Flush the log when you wake up. */
+ u_int32_t flags;
+};
+
+/*
+ * FNAME --
+ * File name and id.
+ */
+struct __fname {
+ SH_TAILQ_ENTRY q; /* File name queue. */
+
+ int32_t id; /* Logging file id. */
+ DBTYPE s_type; /* Saved DB type. */
+
+ roff_t name_off; /* Name offset. */
+ db_pgno_t meta_pgno; /* Page number of the meta page. */
+ u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */
+
+ u_int32_t create_txnid; /*
+ * Txn ID of the DB create, stored so
+ * we can log it at register time.
+ */
+};
+
+/* File open/close register log record opcodes. */
+#define LOG_CHECKPOINT 1 /* Checkpoint: file name/id dump. */
+#define LOG_CLOSE 2 /* File close. */
+#define LOG_OPEN 3 /* File open. */
+#define LOG_RCLOSE 4 /* File close after recovery. */
+
+#define CHECK_LSN(redo, cmp, lsn, prev) \
+ DB_ASSERT(!DB_REDO(redo) || \
+ (cmp) >= 0 || IS_NOT_LOGGED_LSN(*lsn)); \
+ if (DB_REDO(redo) && (cmp) < 0 && !IS_NOT_LOGGED_LSN(*(lsn))) { \
+ __db_err(dbenv, \
+ "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", \
+ (u_long)(lsn)->file, (u_long)(lsn)->offset, \
+ (u_long)(prev)->file, (u_long)(prev)->offset); \
+ goto out; \
+ }
+
+/*
+ * Status codes indicating the validity of a log file examined by
+ * __log_valid().
+ */
+typedef enum {
+ DB_LV_INCOMPLETE,
+ DB_LV_NONEXISTENT,
+ DB_LV_NORMAL,
+ DB_LV_OLD_READABLE,
+ DB_LV_OLD_UNREADABLE
+} logfile_validity;
+
+#include "dbinc_auto/dbreg_auto.h"
+#include "dbinc_auto/dbreg_ext.h"
+#include "dbinc_auto/log_ext.h"
+#endif /* !_LOG_H_ */
diff --git a/storage/bdb/dbinc/mp.h b/storage/bdb/dbinc/mp.h
new file mode 100644
index 00000000000..5c805b92364
--- /dev/null
+++ b/storage/bdb/dbinc/mp.h
@@ -0,0 +1,293 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: mp.h,v 11.44 2002/08/06 06:11:21 bostic Exp $
+ */
+
+#ifndef _DB_MP_H_
+#define _DB_MP_H_
+
+struct __bh; typedef struct __bh BH;
+struct __db_mpool_hash; typedef struct __db_mpool_hash DB_MPOOL_HASH;
+struct __db_mpreg; typedef struct __db_mpreg DB_MPREG;
+struct __mpool; typedef struct __mpool MPOOL;
+
+ /* We require at least 20KB of cache. */
+#define DB_CACHESIZE_MIN (20 * 1024)
+
+typedef enum {
+ DB_SYNC_ALLOC, /* Flush for allocation. */
+ DB_SYNC_CACHE, /* Checkpoint or flush entire cache. */
+ DB_SYNC_FILE, /* Flush file. */
+ DB_SYNC_TRICKLE /* Trickle sync. */
+} db_sync_op;
+
+/*
+ * DB_MPOOL --
+ * Per-process memory pool structure.
+ */
+struct __db_mpool {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ /* List of pgin/pgout routines. */
+ LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
+
+ /* List of DB_MPOOLFILE's. */
+ TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;
+
+ /*
+ * The dbenv, nreg and reginfo fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ DB_ENV *dbenv; /* Enclosing environment. */
+
+ u_int32_t nreg; /* N underlying cache regions. */
+ REGINFO *reginfo; /* Underlying cache regions. */
+};
+
+/*
+ * DB_MPREG --
+ * DB_MPOOL registry of pgin/pgout functions.
+ */
+struct __db_mpreg {
+ LIST_ENTRY(__db_mpreg) q; /* Linked list. */
+
+ int32_t ftype; /* File type. */
+ /* Pgin, pgout routines. */
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+};
+
+/*
+ * NCACHE --
+ * Select a cache based on the file and the page number. Assumes accesses
+ * are uniform across pages, which is probably OK. What we really want to
+ * avoid is anything that puts all pages from any single file in the same
+ * cache, as we expect that file access will be bursty, and to avoid
+ * putting all page number N pages in the same cache as we expect access
+ * to the metapages (page 0) and the root of a btree (page 1) to be much
+ * more frequent than a random data page.
+ */
+#define NCACHE(mp, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) >> 3)) % ((MPOOL *)mp)->nreg)
+
+/*
+ * NBUCKET --
+ * We make the assumption that early pages of the file are more likely
+ * to be retrieved than the later pages, which means the top bits will
+ * be more interesting for hashing as they're less likely to collide.
+ * That said, as 512 8K pages represents a 4MB file, so only reasonably
+ * large files will have page numbers with any other than the bottom 9
+ * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the
+ * page, since that should also be unique for the page. We don't want
+ * to do anything very fancy -- speed is more important to us than using
+ * good hashing.
+ */
+#define NBUCKET(mc, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
+
+/*
+ * MPOOL --
+ * Shared memory pool region.
+ */
+struct __mpool {
+ /*
+ * The memory pool can be broken up into individual pieces/files.
+ * Not what we would have liked, but on Solaris you can allocate
+ * only a little more than 2GB of memory in a contiguous chunk,
+ * and I expect to see more systems with similar issues.
+ *
+ * While this structure is duplicated in each piece of the cache,
+ * the first of these pieces/files describes the entire pool, the
+ * second only describe a piece of the cache.
+ */
+
+ /*
+ * The lsn field and list of underlying MPOOLFILEs are thread protected
+ * by the region lock.
+ */
+ DB_LSN lsn; /* Maximum checkpoint LSN. */
+
+ SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */
+
+ /*
+ * The nreg, regids and maint_off fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ u_int32_t nreg; /* Number of underlying REGIONS. */
+ roff_t regids; /* Array of underlying REGION Ids. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* Maintenance information offset */
+#endif
+
+ /*
+ * The following structure fields only describe the per-cache portion
+ * of the region.
+ *
+ * The htab and htab_buckets fields are not thread protected as they
+ * are initialized during mpool creation, and not modified again.
+ *
+ * The last_checked and lru_count fields are thread protected by
+ * the region lock.
+ */
+ int htab_buckets; /* Number of hash table entries. */
+ roff_t htab; /* Hash table offset. */
+ u_int32_t last_checked; /* Last bucket checked for free. */
+ u_int32_t lru_count; /* Counter for buffer LRU */
+
+ /*
+ * The stat fields are generally not thread protected, and cannot be
+ * trusted. Note that st_pages is an exception, and is always updated
+ * inside a region lock (although it is sometimes read outside of the
+ * region lock).
+ */
+ DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */
+};
+
+struct __db_mpool_hash {
+ DB_MUTEX hash_mutex; /* Per-bucket mutex. */
+
+ DB_HASHTAB hash_bucket; /* Head of bucket. */
+
+ u_int32_t hash_page_dirty;/* Count of dirty pages. */
+ u_int32_t hash_priority; /* Minimum priority of bucket buffer. */
+};
+
+/*
+ * The base mpool priority is 1/4th of the name space, or just under 2^30.
+ * When the LRU counter wraps, we shift everybody down to a base-relative
+ * value.
+ */
+#define MPOOL_BASE_DECREMENT (UINT32_T_MAX - (UINT32_T_MAX / 4))
+
+/*
+ * Mpool priorities from low to high. Defined in terms of fractions of the
+ * buffers in the pool.
+ */
+#define MPOOL_PRI_VERY_LOW -1 /* Dead duck. Check and set to 0. */
+#define MPOOL_PRI_LOW -2 /* Low. */
+#define MPOOL_PRI_DEFAULT 0 /* No adjustment -- special case.*/
+#define MPOOL_PRI_HIGH 10 /* With the dirty buffers. */
+#define MPOOL_PRI_DIRTY 10 /* Dirty gets a 10% boost. */
+#define MPOOL_PRI_VERY_HIGH 1 /* Add number of buffers in pool. */
+
+/*
+ * MPOOLFILE_IGNORE --
+ * Discard an MPOOLFILE and any buffers it references: update the flags
+ * so we never try to write buffers associated with the file, nor can we
+ * find it when looking for files to join. In addition, clear the ftype
+ * field, there's no reason to post-process pages, they can be discarded
+ * by any thread.
+ *
+ * Expects the MPOOLFILE mutex to be held.
+ */
+#define MPOOLFILE_IGNORE(mfp) { \
+ (mfp)->ftype = 0; \
+ F_SET(mfp, MP_DEADFILE); \
+}
+
+/*
+ * MPOOLFILE --
+ * Shared DB_MPOOLFILE information.
+ */
+struct __mpoolfile {
+ DB_MUTEX mutex;
+
+ /* Protected by MPOOLFILE mutex. */
+ u_int32_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */
+ u_int32_t block_cnt; /* Ref count: blocks in cache. */
+
+ roff_t path_off; /* File name location. */
+
+ /* Protected by mpool cache 0 region lock. */
+ SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */
+ db_pgno_t last_pgno; /* Last page in the file. */
+ db_pgno_t orig_last_pgno; /* Original last page in the file. */
+
+ /*
+ * None of the following fields are thread protected.
+ *
+ * There are potential races with the ftype field because it's read
+ * without holding a lock. However, it has to be set before adding
+ * any buffers to the cache that depend on it being set, so there
+ * would need to be incorrect operation ordering to have a problem.
+ *
+ * There are potential races with the priority field because it's read
+ * without holding a lock. However, a collision is unlikely and if it
+ * happens is of little consequence.
+ *
+ * We do not protect the statistics in "stat" because of the cost of
+ * the mutex in the get/put routines. There is a chance that a count
+ * will get lost.
+ *
+ * The remaining fields are initialized at open and never subsequently
+ * modified, except for the MP_DEADFILE, which is only set and never
+ * unset. (If there was more than one flag that was subsequently set,
+ * there might be a race, but with a single flag there can't be.)
+ */
+ int32_t ftype; /* File type. */
+
+ int32_t priority; /* Priority when unpinning buffer. */
+
+ DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */
+
+ int32_t lsn_off; /* Page's LSN offset. */
+ u_int32_t clear_len; /* Bytes to clear on page create. */
+
+ roff_t fileid_off; /* File ID string location. */
+
+ roff_t pgcookie_len; /* Pgin/pgout cookie length. */
+ roff_t pgcookie_off; /* Pgin/pgout cookie location. */
+
+#define MP_CAN_MMAP 0x01 /* If the file can be mmap'd. */
+#define MP_DEADFILE 0x02 /* Dirty pages can simply be trashed. */
+#define MP_DIRECT 0x04 /* No OS buffering. */
+#define MP_EXTENT 0x08 /* Extent file. */
+#define MP_TEMP 0x10 /* Backing file is a temporary. */
+#define MP_UNLINK 0x20 /* Unlink file on last close. */
+ u_int32_t flags;
+};
+
+/*
+ * BH --
+ * Buffer header.
+ */
+struct __bh {
+ DB_MUTEX mutex; /* Buffer thread/process lock. */
+
+ u_int16_t ref; /* Reference count. */
+ u_int16_t ref_sync; /* Sync wait-for reference count. */
+
+#define BH_CALLPGIN 0x001 /* Convert the page before use. */
+#define BH_DIRTY 0x002 /* Page was modified. */
+#define BH_DIRTY_CREATE 0x004 /* Page created, must be written. */
+#define BH_DISCARD 0x008 /* Page is useless. */
+#define BH_LOCKED 0x010 /* Page is locked (I/O in progress). */
+#define BH_TRASH 0x020 /* Page is garbage. */
+ u_int16_t flags;
+
+ u_int32_t priority; /* LRU priority. */
+ SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */
+
+ db_pgno_t pgno; /* Underlying MPOOLFILE page number. */
+ roff_t mf_offset; /* Associated MPOOLFILE offset. */
+
+ /*
+ * !!!
+ * This array must be at least size_t aligned -- the DB access methods
+ * put PAGE and other structures into it, and then access them directly.
+ * (We guarantee size_t alignment to applications in the documentation,
+ * too.)
+ */
+ u_int8_t buf[1]; /* Variable length data. */
+};
+
+#include "dbinc_auto/mp_ext.h"
+#endif /* !_DB_MP_H_ */
diff --git a/storage/bdb/dbinc/mutex.h b/storage/bdb/dbinc/mutex.h
new file mode 100644
index 00000000000..41bb1b4bb59
--- /dev/null
+++ b/storage/bdb/dbinc/mutex.h
@@ -0,0 +1,879 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: mutex.h,v 11.71 2002/09/10 01:36:48 bostic Exp $
+ */
+
+#ifndef _DB_MUTEX_H_
+#define _DB_MUTEX_H_
+
+/*
+ * Some of the Berkeley DB ports require single-threading at various
+ * places in the code. In those cases, these #defines will be set.
+ */
+#define DB_BEGIN_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD
+
+/*********************************************************************
+ * POSIX.1 pthreads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PTHREADS
+#include <pthread.h>
+
+#define MUTEX_FIELDS \
+ pthread_mutex_t mutex; /* Mutex. */ \
+ pthread_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris lwp threads interface.
+ *
+ * !!!
+ * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
+ * which are available), for two reasons. First, the Solaris C library
+ * includes versions of the both UI and POSIX thread mutex interfaces, but
+ * they are broken in that they don't support inter-process locking, and
+ * there's no way to detect it, e.g., calls to configure the mutexes for
+ * inter-process locking succeed without error. So, we use LWP mutexes so
+ * that we don't fail in fairly undetectable ways because the application
+ * wasn't linked with the appropriate threads library. Second, there were
+ * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
+ * before loading the libthread/libpthread threads libraries (e.g., by using
+ * dlopen to load the DB library), the pwrite64 interface would be translated
+ * into a call to pwrite and DB would drop core.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+/*
+ * XXX
+ * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
+ * Solaris manual page as the correct include to use, it causes the Solaris
+ * compiler on SunOS 2.6 to fail.
+ */
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ lwp_mutex_t mutex; /* Mutex. */ \
+ lwp_cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * Solaris/Unixware threads interface.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UI_THREADS
+#include <thread.h>
+#include <synch.h>
+
+#define MUTEX_FIELDS \
+ mutex_t mutex; /* Mutex. */ \
+ cond_t cond; /* Condition variable. */
+#endif
+
+/*********************************************************************
+ * AIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
+#include <sys/atomic_op.h>
+typedef int tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!_check_lock(x, 0, 1))
+#define MUTEX_UNSET(x) _clear_lock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (msemaphore).
+ *
+ * !!!
+ * Check for HPPA as a special case, because it requires unusual alignment,
+ * and doesn't support semaphores in malloc(3) or shmget(2) memory.
+ *
+ * !!!
+ * Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single
+ * process makes two msem_lock() calls in a row, the second one returns an
+ * error. We depend on the fact that we can lock against ourselves in the
+ * locking subsystem, where we set up a mutex so that we can block ourselves.
+ * Tested on OSF1 v4.0.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+#define MUTEX_NO_MALLOC_LOCKS
+#define MUTEX_NO_SHMGET_LOCKS
+
+#define MUTEX_ALIGN 16
+#endif
+
+#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+#include <sys/mman.h>
+typedef msemaphore tsl_t;
+
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN sizeof(int)
+#endif
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
+#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
+#define MUTEX_UNSET(x) msem_unlock(x, 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Plan 9 library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_PLAN9
+typedef Lock tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+
+#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
+#define MUTEX_SET(x) canlock(x)
+#define MUTEX_UNSET(x) unlock(x)
+#endif
+
+/*********************************************************************
+ * Reliant UNIX C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
+#include <ulocks.h>
+typedef spinlock_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (initspin(x, 1), 0)
+#define MUTEX_SET(x) (cspinlock(x) == 0)
+#define MUTEX_UNSET(x) spinunlock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * General C library functions (POSIX 1003.1 sema_XXX).
+ *
+ * !!!
+ * Never selected by autoconfig in this release (semaphore calls are known
+ * to not work in Solaris 5.5).
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SEMA_INIT
+#include <synch.h>
+typedef sema_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_DESTROY(x) sema_destroy(x)
+#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
+#define MUTEX_SET(x) (sema_wait(x) == 0)
+#define MUTEX_UNSET(x) sema_post(x)
+#endif
+#endif
+
+/*********************************************************************
+ * SGI C library functions.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SGI_INIT_LOCK
+#include <abi_mutex.h>
+typedef abilock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) (init_lock(x) != 0)
+#define MUTEX_SET(x) (!acquire_lock(x))
+#define MUTEX_UNSET(x) release_lock(x)
+#endif
+#endif
+
+/*********************************************************************
+ * Solaris C library functions.
+ *
+ * !!!
+ * These are undocumented functions, but they're the only ones that work
+ * correctly as far as we know.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
+#include <sys/machlock.h>
+typedef lock_t tsl_t;
+#define MUTEX_ALIGN sizeof(int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) _lock_try(x)
+#define MUTEX_UNSET(x) _lock_clear(x)
+#endif
+#endif
+
+/*********************************************************************
+ * VMS.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VMS
+#include <sys/mman.h>;
+#include <builtins.h>
+typedef unsigned char tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifdef __ALPHA
+#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
+#else /* __VAX */
+#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
+#endif
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * VxWorks
+ * Use basic binary semaphores in VxWorks, as we currently do not need
+ * any special features. We do need the ability to single-thread the
+ * entire system, however, because VxWorks doesn't support the open(2)
+ * flag O_EXCL, the mechanism we normally use to single thread access
+ * when we're first looking for a DB environment.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_VXWORKS
+#include "taskLib.h"
+typedef SEM_ID tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
+#define MUTEX_UNSET(tsl) (semGive((*tsl)))
+#define MUTEX_INIT(tsl) \
+ ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
+#define MUTEX_DESTROY(tsl) semDelete(*tsl)
+#endif
+
+/*
+ * Use the taskLock() mutex to eliminate a race where two tasks are
+ * trying to initialize the global lock at the same time.
+ */
+#undef DB_BEGIN_SINGLE_THREAD
+#define DB_BEGIN_SINGLE_THREAD \
+do { \
+ if (DB_GLOBAL(db_global_init)) \
+ (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \
+ else { \
+ taskLock(); \
+ if (DB_GLOBAL(db_global_init)) { \
+ taskUnlock(); \
+ (void)semTake(DB_GLOBAL(db_global_lock), \
+ WAIT_FOREVER); \
+ continue; \
+ } \
+ DB_GLOBAL(db_global_lock) = \
+ semBCreate(SEM_Q_FIFO, SEM_EMPTY); \
+ if (DB_GLOBAL(db_global_lock) != NULL) \
+ DB_GLOBAL(db_global_init) = 1; \
+ taskUnlock(); \
+ } \
+} while (DB_GLOBAL(db_global_init) == 0)
+#undef DB_END_SINGLE_THREAD
+#define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock))
+#endif
+
+/*********************************************************************
+ * Win16
+ *
+ * Win16 spinlocks are simple because we cannot possibly be preempted.
+ *
+ * !!!
+ * We should simplify this by always returning a no-need-to-lock lock
+ * when we initialize the mutex.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN16
+typedef unsigned int tsl_t;
+#define MUTEX_ALIGN sizeof(unsigned int)
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(tsl) (*(tsl) = 1)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * Win32
+ *********************************************************************/
+#ifdef HAVE_MUTEX_WIN32
+#define MUTEX_FIELDS \
+ LONG tas; \
+ LONG nwaiters; \
+ union { \
+ HANDLE event; /* Windows event HANDLE for wakeups */ \
+ u_int32_t id; /* ID used for shared mutexes */ \
+ } /* anonymous */;
+
+#if defined(LOAD_ACTUAL_MUTEX_CODE)
+#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * 68K/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/68K, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("tas %1; \n \
+ seq %0" \
+ : "=dm" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * ALPHA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 4
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/alpha. Should return 0 if could not acquire the lock, 1 if
+ * lock was acquired properly.
+ */
+#ifdef __GNUC__
+static inline int
+MUTEX_SET(tsl_t *tsl) {
+ register tsl_t *__l = tsl;
+ register tsl_t __r;
+ asm volatile(
+ "1: ldl_l %0,%2\n"
+ " blbs %0,2f\n"
+ " or $31,1,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ " br 3f\n"
+ "2: xor %0,%0\n"
+ "3:"
+ : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
+ return __r;
+}
+
+/*
+ * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
+ * might be necessary before unlocking
+ */
+static inline int
+MUTEX_UNSET(tsl_t *tsl) {
+ asm volatile(" mb\n");
+ return *tsl = 0;
+}
+#endif
+
+#ifdef __DECC
+#include <alpha/builtins.h>
+#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#endif
+
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * ARM/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For arm/gcc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ int __r; \
+ asm volatile("swpb %0, %1, [%2]" \
+ : "=r" (__r) \
+ : "0" (1), "r" (tsl) \
+ : "memory" \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * HPPA/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
+typedef u_int32_t tsl_t;
+#define MUTEX_ALIGN 16
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
+ * The 32-bit word used by that instruction must be 16-byte aligned. We could
+ * use the "aligned" attribute in GCC but that doesn't work for stack variables.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = -1)
+#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
+#endif
+#endif
+
+/*********************************************************************
+ * IA64/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/ia64, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ long __r; \
+ asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\
+ __r ^ 1; \
+})
+
+/*
+ * Store through a "volatile" pointer so we get a store with "release"
+ * semantics.
+ */
+#define MUTEX_UNSET(tsl) (*(volatile unsigned char *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * PowerPC/gcc assembly.
+ *********************************************************************/
+#if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) || \
+ (HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
+typedef u_int32_t tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * The PowerPC does a sort of pseudo-atomic locking. You set up a
+ * 'reservation' on a chunk of memory containing a mutex by loading the
+ * mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary)
+ * value, you then try storing into it with STWCX. If no other process or
+ * thread broke your 'reservation' by modifying the memory containing the
+ * mutex, then the STCWX succeeds; otherwise it fails and you try to get
+ * a reservation again.
+ *
+ * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
+ * entire cache line, normally 32 bytes, aligned naturally. If the mutex
+ * lives near data that gets changed a lot, there's a chance that you'll
+ * see more broken reservations than you might otherwise. The only
+ * situation in which this might be a problem is if one processor is
+ * beating on a variable in the same cache block as the mutex while another
+ * processor tries to acquire the mutex. That's bad news regardless
+ * because of the way it bashes caches, but if you can't guarantee that a
+ * mutex will reside in a relatively quiescent cache line, you might
+ * consider padding the mutex to force it to live in a cache line by
+ * itself. No, you aren't guaranteed that cache lines are 32 bytes. Some
+ * embedded processors use 16-byte cache lines, while some 64-bit
+ * processors use 128-bit cache lines. But assuming a 32-byte cache line
+ * won't get you into trouble for now.
+ *
+ * If mutex locking is a bottleneck, then you can speed it up by adding a
+ * regular LWZ load before the LWARX load, so that you can test for the
+ * common case of a locked mutex without wasting cycles making a reservation.
+ *
+ * 'set' mutexes have the value 1, like on Intel; the returned value from
+ * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
+ *
+ * Mutexes on Mac OS X work the same way as the standard PowerPC version, but
+ * the assembler syntax is subtly different -- the standard PowerPC version
+ * assembles but doesn't work correctly. This version makes (unnecessary?)
+ * use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the
+ * ___db_mutex_set label is used as a function name.
+ */
+#ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
+extern int __db_mutex_set __P((volatile tsl_t *));
+void
+__db_mutex_tas_dummy()
+{
+ __asm__ __volatile__(" \n\
+ .globl ___db_mutex_set \n\
+___db_mutex_set: \n\
+ lwarx r5,0,r3 \n\
+ cmpwi r5,0 \n\
+ bne fail \n\
+ addi r5,r5,1 \n\
+ stwcx. r5,0,r3 \n\
+ beq success \n\
+fail: \n\
+ li r3,0 \n\
+ blr \n\
+success: \n\
+ li r3,1 \n\
+ blr");
+}
+#define MUTEX_SET(tsl) __db_mutex_set(tsl)
+#endif
+#ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
+#define MUTEX_SET(tsl) ({ \
+ int __one = 1; \
+ int __r; \
+ tsl_t *__l = (tsl); \
+ asm volatile (" \
+0: \
+ lwarx %0,0,%1; \
+ cmpwi %0,0; \
+ bne 1f; \
+ stwcx. %2,0,%1; \
+ bne- 0b; \
+1:" \
+ : "=&r" (__r) \
+ : "r" (__l), "r" (__one)); \
+ !(__r & 1); \
+})
+#endif
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * S/390 32-bit assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
+typedef int tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/S390, 0 is clear, 1 is set.
+ */
+static inline int
+MUTEX_SET(tsl_t *tsl) { \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile( \
+ " la 1,%1\n" \
+ " lhi 0,1\n" \
+ " l %0,%1\n" \
+ "0: cs %0,0,0(1)\n" \
+ " jl 0b" \
+ : "=&d" (__r), "+m" (*__l) \
+ : : "0", "1", "cc"); \
+ return !__r; \
+}
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * SCO/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * UnixWare has threads in libthread, but OpenServer doesn't (yet).
+ *
+ * For cc/x86, 0 is clear, 1 is set.
+ */
+
+#if defined(__USLC__)
+asm int
+_tsl_set(void *tsl)
+{
+%mem tsl
+ movl tsl, %ecx
+ movl $1, %eax
+ lock
+ xchgb (%ecx),%al
+ xorl $1,%eax
+}
+#endif
+
+#define MUTEX_SET(tsl) _tsl_set(tsl)
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * Sparc/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ *
+ * The ldstub instruction takes the location specified by its first argument
+ * (a register containing a memory address) and loads its contents into its
+ * second argument (a register) and atomically sets the contents the location
+ * specified by its first argument to a byte of 1s. (The value in the second
+ * argument is never read, but only overwritten.)
+ *
+ * The stbar is needed for v8, and is implemented as membar #sync on v9,
+ * so is functional there as well. For v7, stbar may generate an illegal
+ * instruction and we have no way to tell what we're running on. Some
+ * operating systems notice and skip this instruction in the fault handler.
+ *
+ * For gcc/sparc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ register tsl_t __r; \
+ __asm__ volatile \
+ ("ldstub [%1],%0; stbar" \
+ : "=r"( __r) : "r" (__l)); \
+ !__r; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * UTS/cc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
+typedef int tsl_t;
+
+#define MUTEX_ALIGN sizeof(int)
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#define MUTEX_INIT(x) 0
+#define MUTEX_SET(x) (!uts_lock(x, 1))
+#define MUTEX_UNSET(x) (*(x) = 0)
+#endif
+#endif
+
+/*********************************************************************
+ * x86/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/x86, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\
+ : "=&a" (__r), "=m" (*__l) \
+ : "1" (*__l) \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*
+ * Mutex alignment defaults to one byte.
+ *
+ * !!!
+ * Various systems require different alignments for mutexes (the worst we've
+ * seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed
+ * to return reasonable alignment, all other mutex users must ensure proper
+ * alignment locally.
+ */
+#ifndef MUTEX_ALIGN
+#define MUTEX_ALIGN 1
+#endif
+
+/*
+ * Mutex destruction defaults to a no-op.
+ */
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+#ifndef MUTEX_DESTROY
+#define MUTEX_DESTROY(x)
+#endif
+#endif
+
+/*
+ * !!!
+ * These defines are separated into the u_int8_t flags stored in the
+ * mutex below, and the 32 bit flags passed to __db_mutex_setup.
+ * But they must co-exist and not overlap. Flags to __db_mutex_setup are:
+ *
+ * MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated.
+ * The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever
+ * you use this flag. If this flag is not set, the 'ptr' arg is
+ * a DB_MUTEX *.
+ * MUTEX_NO_RECORD - Explicitly do not record the mutex in the region.
+ * Otherwise the mutex will be recorded by default. If you set
+ * this you need to understand why you don't need it recorded. The
+ * *only* ones not recorded are those that are part of region structures
+ * that only get destroyed when the regions are destroyed.
+ * MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise
+ * the region will be locked by default.
+ * MUTEX_SELF_BLOCK - Set if self blocking mutex.
+ * MUTEX_THREAD - Set if mutex is a thread-only mutex.
+ */
+#define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
+#define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
+#define MUTEX_MPOOL 0x004 /* Allocated from mpool. */
+#define MUTEX_SELF_BLOCK 0x008 /* Must block self. */
+/* Flags only, may be larger than 0xff. */
+#define MUTEX_ALLOC 0x00000100 /* Allocate and init a mutex */
+#define MUTEX_NO_RECORD 0x00000200 /* Do not record lock */
+#define MUTEX_NO_RLOCK 0x00000400 /* Do not acquire region lock */
+#define MUTEX_THREAD 0x00000800 /* Thread-only mutex. */
+
+/* Mutex. */
+struct __mutex_t {
+#ifdef HAVE_MUTEX_THREADS
+#ifdef MUTEX_FIELDS
+ MUTEX_FIELDS
+#else
+ tsl_t tas; /* Test and set. */
+#endif
+ u_int32_t spins; /* Spins before block. */
+ u_int32_t locked; /* !0 if locked. */
+#else
+ u_int32_t off; /* Byte offset to lock. */
+ u_int32_t pid; /* Lock holder: 0 or process pid. */
+#endif
+ u_int32_t mutex_set_wait; /* Granted after wait. */
+ u_int32_t mutex_set_nowait; /* Granted without waiting. */
+ u_int32_t mutex_set_spin; /* Granted without spinning. */
+ u_int32_t mutex_set_spins; /* Total number of spins. */
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t reg_off; /* Shared lock info offset. */
+#endif
+
+ u_int8_t flags; /* MUTEX_XXX */
+};
+
+/* Redirect calls to the correct functions. */
+#ifdef HAVE_MUTEX_THREADS
+#if defined(HAVE_MUTEX_PTHREADS) || \
+ defined(HAVE_MUTEX_SOLARIS_LWP) || \
+ defined(HAVE_MUTEX_UI_THREADS)
+#define __db_mutex_init_int(a, b, c, d) __db_pthread_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
+#elif defined(HAVE_MUTEX_WIN32)
+#define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_win32_mutex_destroy(a)
+#else
+#define __db_mutex_init_int(a, b, c, d) __db_tas_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_tas_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
+#endif
+#else
+#define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
+#define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
+#endif
+
+/* Redirect system resource calls to correct functions */
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
+#define __db_mutex_init(a, b, c, d, e, f) \
+ __db_shreg_mutex_init(a, b, c, d, e, f)
+#else
+#define __db_maintinit(a, b, c)
+#define __db_shlocks_clear(a, b, c)
+#define __db_shlocks_destroy(a, b)
+#define __db_mutex_init(a, b, c, d, e, f) __db_mutex_init_int(a, b, c, d)
+#endif
+
+/*
+ * Lock/unlock a mutex. If the mutex was marked as uninteresting, the thread
+ * of control can proceed without it.
+ *
+ * If the lock is for threads-only, then it was optionally not allocated and
+ * file handles aren't necessary, as threaded applications aren't supported by
+ * fcntl(2) locking.
+ */
+#ifdef DIAGNOSTIC
+ /*
+ * XXX
+ * We want to switch threads as often as possible. Yield every time
+ * we get a mutex to ensure contention.
+ */
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0); \
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) \
+ __os_yield(NULL, 1);
+#else
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_lock(dbenv, mp);
+#endif
+#define MUTEX_UNLOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_unlock(dbenv, mp);
+#define MUTEX_THREAD_LOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_LOCK(dbenv, mp)
+#define MUTEX_THREAD_UNLOCK(dbenv, mp) \
+ if (mp != NULL) \
+ MUTEX_UNLOCK(dbenv, mp)
+
+/*
+ * We use a single file descriptor for fcntl(2) locking, and (generally) the
+ * object's offset in a shared region as the byte that we're locking. So,
+ * there's a (remote) possibility that two objects might have the same offsets
+ * such that the locks could conflict, resulting in deadlock. To avoid this
+ * possibility, we offset the region offset by a small integer value, using a
+ * different offset for each subsystem's locks. Since all region objects are
+ * suitably aligned, the offset guarantees that we don't collide with another
+ * region's objects.
+ */
+#define DB_FCNTL_OFF_GEN 0 /* Everything else. */
+#define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
+#define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * When the underlying mutexes require library (most likely heap) or system
+ * resources, we have to clean up when we discard mutexes (for the library
+ * resources) and both when discarding mutexes and after application failure
+ * (for the mutexes requiring system resources). This violates the rule that
+ * we never look at a shared region after application failure, but we've no
+ * other choice. In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
+ * set.
+ *
+ * To support mutex release after application failure, allocate thread-handle
+ * mutexes in shared memory instead of in the heap. The number of slots we
+ * allocate for this purpose isn't configurable, but this tends to be an issue
+ * only on embedded systems where we don't expect large server applications.
+ */
+#define DB_MAX_HANDLES 100 /* Mutex slots for handles. */
+#endif
+#endif /* !_DB_MUTEX_H_ */
diff --git a/storage/bdb/dbinc/os.h b/storage/bdb/dbinc/os.h
new file mode 100644
index 00000000000..01ca0ac470d
--- /dev/null
+++ b/storage/bdb/dbinc/os.h
@@ -0,0 +1,54 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: os.h,v 11.14 2002/03/27 04:34:55 bostic Exp $
+ */
+
+#ifndef _DB_OS_H_
+#define _DB_OS_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* DB filehandle. */
+struct __fh_t {
+#if defined(DB_WIN32)
+ HANDLE handle; /* Windows/32 file handle. */
+#endif
+ int fd; /* POSIX file descriptor. */
+ char *name; /* File name. */
+
+ u_int32_t log_size; /* XXX: Log file size. */
+ u_int32_t pagesize; /* XXX: Page size. */
+
+#define DB_FH_NOSYNC 0x01 /* Handle doesn't need to be sync'd. */
+#define DB_FH_UNLINK 0x02 /* Unlink on close */
+#define DB_FH_VALID 0x04 /* Handle is valid. */
+ u_int8_t flags;
+};
+
+/*
+ * We group certain seek/write calls into a single function so that we
+ * can use pread(2)/pwrite(2) where they're available.
+ */
+#define DB_IO_READ 1
+#define DB_IO_WRITE 2
+typedef struct __io_t {
+ DB_FH *fhp; /* I/O file handle. */
+ DB_MUTEX *mutexp; /* Mutex to lock. */
+ size_t pagesize; /* Page size. */
+ db_pgno_t pgno; /* Page number. */
+ u_int8_t *buf; /* Buffer. */
+ size_t bytes; /* Bytes read/written. */
+} DB_IO;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#include "dbinc_auto/os_ext.h"
+#endif /* !_DB_OS_H_ */
diff --git a/storage/bdb/dbinc/qam.h b/storage/bdb/dbinc/qam.h
new file mode 100644
index 00000000000..0306ed07d2a
--- /dev/null
+++ b/storage/bdb/dbinc/qam.h
@@ -0,0 +1,156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: qam.h,v 11.38 2002/08/06 06:11:21 bostic Exp $
+ */
+
+#ifndef _DB_QAM_H_
+#define _DB_QAM_H_
+
+/*
+ * QAM data elements: a status field and the data.
+ */
+typedef struct _qamdata {
+ u_int8_t flags; /* 00: delete bit. */
+#define QAM_VALID 0x01
+#define QAM_SET 0x02
+ u_int8_t data[1]; /* Record. */
+} QAMDATA;
+
+struct __queue; typedef struct __queue QUEUE;
+struct __qcursor; typedef struct __qcursor QUEUE_CURSOR;
+
+struct __qcursor {
+ /* struct __dbc_internal */
+ __DBC_INTERNAL
+
+ /* Queue private part */
+
+ /* Per-thread information: queue private. */
+ db_recno_t recno; /* Current record number. */
+
+ u_int32_t flags;
+};
+
+typedef struct __mpfarray {
+ u_int32_t n_extent; /* Number of extents in table. */
+ u_int32_t low_extent; /* First extent open. */
+ u_int32_t hi_extent; /* Last extent open. */
+ struct __qmpf {
+ int pinref;
+ DB_MPOOLFILE *mpf;
+ } *mpfarray; /* Array of open extents. */
+} MPFARRAY;
+
+/*
+ * The in-memory, per-tree queue data structure.
+ */
+struct __queue {
+ db_pgno_t q_meta; /* Database meta-data page. */
+ db_pgno_t q_root; /* Database root page. */
+
+ int re_pad; /* Fixed-length padding byte. */
+ u_int32_t re_len; /* Length for fixed-length records. */
+ u_int32_t rec_page; /* records per page */
+ u_int32_t page_ext; /* Pages per extent */
+ MPFARRAY array1, array2; /* File arrays. */
+
+ /* Extent file configuration: */
+ DBT pgcookie; /* Initialized pgcookie. */
+ DB_PGINFO pginfo; /* Initialized pginfo struct. */
+
+ char *path; /* Space allocated to file pathname. */
+ char *name; /* The name of the file. */
+ char *dir; /* The dir of the file. */
+ int mode; /* Mode to open extents. */
+};
+
+/* Format for queue extent names. */
+#define QUEUE_EXTENT "%s%c__dbq.%s.%d"
+
+typedef struct __qam_filelist {
+ DB_MPOOLFILE *mpf;
+ u_int32_t id;
+} QUEUE_FILELIST;
+
+/*
+ * Caculate the page number of a recno
+ *
+ * Number of records per page =
+ * Divide the available space on the page by the record len + header.
+ *
+ * Page number for record =
+ * divide the physical record number by the records per page
+ * add the root page number
+ * For now the root page will always be 1, but we might want to change
+ * in the future (e.g. multiple fixed len queues per file).
+ *
+ * Index of record on page =
+ * physical record number, less the logical pno times records/page
+ */
+#define CALC_QAM_RECNO_PER_PAGE(dbp) \
+ (((dbp)->pgsize - QPAGE_SZ(dbp)) / \
+ ALIGN(((QUEUE *)(dbp)->q_internal)->re_len + \
+ sizeof(QAMDATA) - SSZA(QAMDATA, data), sizeof(u_int32_t)))
+
+#define QAM_RECNO_PER_PAGE(dbp) (((QUEUE*)(dbp)->q_internal)->rec_page)
+
+#define QAM_RECNO_PAGE(dbp, recno) \
+ (((QUEUE *)(dbp)->q_internal)->q_root \
+ + (((recno) - 1) / QAM_RECNO_PER_PAGE(dbp)))
+
+#define QAM_RECNO_INDEX(dbp, pgno, recno) \
+ (((recno) - 1) - (QAM_RECNO_PER_PAGE(dbp) \
+ * (pgno - ((QUEUE *)(dbp)->q_internal)->q_root)))
+
+#define QAM_GET_RECORD(dbp, page, index) \
+ ((QAMDATA *)((u_int8_t *)(page) + \
+ QPAGE_SZ(dbp) + (ALIGN(sizeof(QAMDATA) - SSZA(QAMDATA, data) + \
+ ((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))
+
+#define QAM_AFTER_CURRENT(meta, recno) \
+ ((recno) > (meta)->cur_recno && \
+ ((meta)->first_recno <= (meta)->cur_recno || (recno) < (meta)->first_recno))
+
+#define QAM_BEFORE_FIRST(meta, recno) \
+ ((recno) < (meta)->first_recno && \
+ ((meta->first_recno <= (meta)->cur_recno || (recno) > (meta)->cur_recno)))
+
+#define QAM_NOT_VALID(meta, recno) \
+ (recno == RECNO_OOB || \
+ QAM_BEFORE_FIRST(meta, recno) || QAM_AFTER_CURRENT(meta, recno))
+
+/*
+ * Log opcodes for the mvptr routine.
+ */
+#define QAM_SETFIRST 0x01
+#define QAM_SETCUR 0x02
+#define QAM_TRUNCATE 0x04
+
+/*
+ * Parameter to __qam_position.
+ */
+typedef enum {
+ QAM_READ,
+ QAM_WRITE,
+ QAM_CONSUME
+} qam_position_mode;
+
+typedef enum {
+ QAM_PROBE_GET,
+ QAM_PROBE_PUT,
+ QAM_PROBE_MPF
+} qam_probe_mode;
+
+#define __qam_fget(dbp, pgnoaddr, flags, addrp) \
+ __qam_fprobe(dbp, *pgnoaddr, addrp, QAM_PROBE_GET, flags)
+
+#define __qam_fput(dbp, pageno, addrp, flags) \
+ __qam_fprobe(dbp, pageno, addrp, QAM_PROBE_PUT, flags)
+
+#include "dbinc_auto/qam_auto.h"
+#include "dbinc_auto/qam_ext.h"
+#endif /* !_DB_QAM_H_ */
diff --git a/storage/bdb/dbinc/queue.h b/storage/bdb/dbinc/queue.h
new file mode 100644
index 00000000000..8d4a771add6
--- /dev/null
+++ b/storage/bdb/dbinc/queue.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+/*
+ * XXX
+ * We #undef the queue macros because there are incompatible versions of this
+ * file and these macros on various systems. What makes the problem worse is
+ * they are included and/or defined by system include files which we may have
+ * already loaded into Berkeley DB before getting here. For example, FreeBSD's
+ * <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
+ * several of the LIST_XXX macros. Make sure we use ours.
+ */
+#undef LIST_HEAD
+#undef LIST_ENTRY
+#undef LIST_FIRST
+#undef LIST_NEXT
+#undef LIST_INIT
+#undef LIST_INSERT_AFTER
+#undef LIST_INSERT_BEFORE
+#undef LIST_INSERT_HEAD
+#undef LIST_REMOVE
+#undef TAILQ_HEAD
+#undef TAILQ_ENTRY
+#undef TAILQ_FIRST
+#undef TAILQ_NEXT
+#undef TAILQ_INIT
+#undef TAILQ_INSERT_HEAD
+#undef TAILQ_INSERT_TAIL
+#undef TAILQ_INSERT_AFTER
+#undef TAILQ_INSERT_BEFORE
+#undef TAILQ_REMOVE
+#undef CIRCLEQ_HEAD
+#undef CIRCLEQ_ENTRY
+#undef CIRCLEQ_FIRST
+#undef CIRCLEQ_LAST
+#undef CIRCLEQ_NEXT
+#undef CIRCLEQ_PREV
+#undef CIRCLEQ_INIT
+#undef CIRCLEQ_INSERT_AFTER
+#undef CIRCLEQ_INSERT_BEFORE
+#undef CIRCLEQ_INSERT_HEAD
+#undef CIRCLEQ_INSERT_TAIL
+#undef CIRCLEQ_REMOVE
+
+/*
+ * This file defines three types of data structures: lists, tail queues,
+ * and circular queues.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may only be traversed in the forward direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) { \
+ (head)->lh_first = NULL; \
+}
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+/*
+ * This macro is used to fixup the queue after moving the head.
+ */
+#define TAILQ_REINSERT_HEAD(head, elm, field) do { \
+ DB_ASSERT((head)->tqh_first == (elm)); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = (void *)(head); \
+ (head)->cqh_last = (void *)(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = (void *)(head); \
+ if ((head)->cqh_last == (void *)(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = (void *)(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == (void *)(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == (void *)(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == (void *)(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/storage/bdb/dbinc/region.h b/storage/bdb/dbinc/region.h
new file mode 100644
index 00000000000..9ee6c81062f
--- /dev/null
+++ b/storage/bdb/dbinc/region.h
@@ -0,0 +1,304 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: region.h,v 11.33 2002/08/06 06:11:22 bostic Exp $
+ */
+
+#ifndef _DB_REGION_H_
+#define _DB_REGION_H_
+
+/*
+ * The DB environment consists of some number of "regions", which are described
+ * by the following four structures:
+ *
+ * REGENV -- shared information about the environment
+ * REGENV_REF -- file describing system memory version of REGENV
+ * REGION -- shared information about a single region
+ * REGINFO -- per-process information about a REGION
+ *
+ * There are three types of memory that hold regions:
+ * per-process heap (malloc)
+ * file mapped into memory (mmap, MapViewOfFile)
+ * system memory (shmget, CreateFileMapping)
+ *
+ * If the regions are private to a process, they're in malloc. If they're
+ * public, they're in file mapped memory, or, optionally, in system memory.
+ * Regions in the filesystem are named "__db.001", "__db.002" and so on. If
+ * we're not using a private environment allocated using malloc(3), the file
+ * "__db.001" will always exist, as we use it to synchronize on the regions,
+ * whether they exist in file mapped memory or system memory.
+ *
+ * The file "__db.001" contains a REGENV structure and a linked list of some
+ * number of REGION structures. Each of the REGION structures describes and
+ * locks one of the underlying shared regions used by DB.
+ *
+ * __db.001
+ * +---------+
+ * |REGENV |
+ * +---------+ +----------+
+ * |REGION |-> | __db.002 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.003 |
+ * | | +----------+
+ * +---------+ +----------+
+ * |REGION |-> | __db.004 |
+ * | | +----------+
+ * +---------+
+ *
+ * The only tricky part about manipulating the regions is correctly creating
+ * or joining the REGENV file, i.e., __db.001. We have to be absolutely sure
+ * that only one process creates it, and that everyone else joins it without
+ * seeing inconsistent data. Once that region is created, we can use normal
+ * shared locking procedures to do mutal exclusion for all other regions.
+ *
+ * One of the REGION structures in the main environment region describes the
+ * environment region itself.
+ *
+ * To lock a region, locate the REGION structure that describes it and acquire
+ * the region's mutex. There is one exception to this rule -- the lock for the
+ * environment region itself is in the REGENV structure, and not in the REGION
+ * that describes the environment region. That's so that we can acquire a lock
+ * without walking linked lists that could potentially change underneath us.
+ * The REGION will not be moved or removed during the life of the region, and
+ * so long-lived references to it can be held by the process.
+ *
+ * All requests to create or join a region return a REGINFO structure, which
+ * is held by the caller and used to open and subsequently close the reference
+ * to the region. The REGINFO structure contains the per-process information
+ * that we need to access the region.
+ *
+ * The one remaining complication. If the regions (including the environment
+ * region) live in system memory, and the system memory isn't "named" somehow
+ * in the filesystem name space, we need some way of finding it. Do this by
+ * by writing the REGENV_REF structure into the "__db.001" file. When we find
+ * a __db.001 file that is too small to be a real, on-disk environment, we use
+ * the information it contains to redirect to the real "__db.001" file/memory.
+ * This currently only happens when the REGENV file is in shared system memory.
+ *
+ * Although DB does not currently grow regions when they run out of memory, it
+ * would be possible to do so. To grow a region, allocate a new region of the
+ * appropriate size, then copy the old region over it and insert the additional
+ * space into the already existing shalloc arena. Callers may have to fix up
+ * local references, but that should be easy to do. This failed in historic
+ * versions of DB because the region lock lived in the mapped memory, and when
+ * it was unmapped and remapped (or copied), threads could lose track of it.
+ * Once we moved that lock into a region that is never unmapped, growing should
+ * work. That all said, current versions of DB don't implement region grow
+ * because some systems don't support mutex copying, e.g., from OSF1 V4.0:
+ *
+ * The address of an msemaphore structure may be significant. If the
+ * msemaphore structure contains any value copied from an msemaphore
+ * structure at a different address, the result is undefined.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DB_REGION_FMT "__db.%03d" /* Region file name format. */
+#define DB_REGION_NAME_NUM 5 /* First digit offset in file names. */
+#define DB_REGION_NAME_LENGTH 8 /* Length of file names. */
+
+#define DB_REGION_ENV "__db.001" /* Primary environment name. */
+
+#define INVALID_REGION_ID 0 /* Out-of-band region ID. */
+#define REGION_ID_ENV 1 /* Primary environment ID. */
+
+typedef enum {
+ INVALID_REGION_TYPE=0, /* Region type. */
+ REGION_TYPE_ENV,
+ REGION_TYPE_LOCK,
+ REGION_TYPE_LOG,
+ REGION_TYPE_MPOOL,
+ REGION_TYPE_MUTEX,
+ REGION_TYPE_TXN } reg_type;
+
+#define INVALID_REGION_SEGID -1 /* Segment IDs are either shmget(2) or
+ * Win16 segment identifiers. They are
+ * both stored in a "long", and we need
+ * an out-of-band value.
+ */
+/*
+ * Nothing can live at region offset 0, because, in all cases, that's where
+ * we store *something*. Lots of code needs an out-of-band value for region
+ * offsets, so we use 0.
+ */
+#define INVALID_ROFF 0
+
+/* Reference describing system memory version of REGENV. */
+typedef struct __db_reg_env_ref {
+ roff_t size; /* Region size. */
+ long segid; /* UNIX shmget ID, VxWorks ID. */
+} REGENV_REF;
+
+/* Per-environment region information. */
+typedef struct __db_reg_env {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ DB_MUTEX mutex; /* Environment mutex. */
+
+ /*
+ * !!!
+ * Note, the magic and panic fields are NOT protected by any mutex,
+ * and for this reason cannot be anything more complicated than a
+ * zero/non-zero value.
+ *
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic; /* Valid region magic number. */
+
+ int envpanic; /* Environment is dead. */
+
+ int majver; /* Major DB version number. */
+ int minver; /* Minor DB version number. */
+ int patch; /* Patch DB version number. */
+
+ u_int32_t init_flags; /* Flags the env was initialized with.*/
+ roff_t cipher_off; /* Offset of cipher area */
+
+ /* List of regions. */
+ SH_LIST_HEAD(__db_regionh) regionq;
+
+ u_int32_t refcnt; /* References to the environment. */
+
+ roff_t rep_off; /* Offset of the replication area. */
+
+ size_t pad; /* Guarantee that following memory is
+ * size_t aligned. This is necessary
+ * because we're going to store the
+ * allocation region information there.
+ */
+} REGENV;
+
+/* Per-region shared region information. */
+typedef struct __db_region {
+ /*
+ * !!!
+ * The mutex must be the first entry in the structure to guarantee
+ * correct alignment.
+ */
+ DB_MUTEX mutex; /* Region mutex. */
+
+ /*
+ * !!!
+ * The valid region magic number must appear at the same byte offset
+ * in both the environment and each shared region, as Windows/95 uses
+ * it to determine if the memory has been zeroed since it was last used.
+ */
+ u_int32_t magic;
+
+ SH_LIST_ENTRY q; /* Linked list of REGIONs. */
+
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+
+ roff_t size; /* Region size in bytes. */
+
+ roff_t primary; /* Primary data structure offset. */
+
+ long segid; /* UNIX shmget(2), Win16 segment ID. */
+} REGION;
+
+/*
+ * Per-process/per-attachment information about a single region.
+ */
+struct __db_reginfo_t { /* __db_r_attach IN parameters. */
+ reg_type type; /* Region type. */
+ u_int32_t id; /* Region id. */
+ int mode; /* File creation mode. */
+
+ /* __db_r_attach OUT parameters. */
+ REGION *rp; /* Shared region. */
+
+ char *name; /* Region file name. */
+
+ void *addr; /* Region allocation address. */
+ void *primary; /* Primary data structure address. */
+
+ void *wnt_handle; /* Win/NT HANDLE. */
+
+#define REGION_CREATE 0x01 /* Caller created region. */
+#define REGION_CREATE_OK 0x02 /* Caller willing to create region. */
+#define REGION_JOIN_OK 0x04 /* Caller is looking for a match. */
+ u_int32_t flags;
+};
+
+/*
+ * Mutex maintenance information each subsystem region must keep track
+ * of to manage resources adequately.
+ */
+typedef struct __db_regmaint_stat_t {
+ u_int32_t st_hint_hit;
+ u_int32_t st_hint_miss;
+ u_int32_t st_records;
+ u_int32_t st_clears;
+ u_int32_t st_destroys;
+ u_int32_t st_max_locks;
+} REGMAINT_STAT;
+
+typedef struct __db_regmaint_t {
+ u_int32_t reglocks; /* Maximum # of mutexes we track. */
+ u_int32_t regmutex_hint; /* Hint for next slot */
+ REGMAINT_STAT stat; /* Stats */
+ roff_t regmutexes[1]; /* Region mutexes in use. */
+} REGMAINT;
+
+/*
+ * R_ADDR Return a per-process address for a shared region offset.
+ * R_OFFSET Return a shared region offset for a per-process address.
+ *
+ * !!!
+ * R_OFFSET should really be returning a ptrdiff_t, but that's not yet
+ * portable. We use u_int32_t, which restricts regions to 4Gb in size.
+ */
+#define R_ADDR(base, offset) \
+ ((void *)((u_int8_t *)((base)->addr) + offset))
+#define R_OFFSET(base, p) \
+ ((u_int32_t)((u_int8_t *)(p) - (u_int8_t *)(base)->addr))
+
+/*
+ * R_LOCK Lock/unlock a region.
+ * R_UNLOCK
+ */
+#define R_LOCK(dbenv, reginfo) \
+ MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex)
+#define R_UNLOCK(dbenv, reginfo) \
+ MUTEX_UNLOCK(dbenv, &(reginfo)->rp->mutex)
+
+/* PANIC_CHECK: Check to see if the DB environment is dead. */
+#define PANIC_CHECK(dbenv) \
+ if (!F_ISSET((dbenv), DB_ENV_NOPANIC) && \
+ (dbenv)->reginfo != NULL && ((REGENV *) \
+ ((REGINFO *)(dbenv)->reginfo)->primary)->envpanic != 0) \
+ return (__db_panic_msg(dbenv));
+
+#define PANIC_SET(dbenv, onoff) \
+ ((REGENV *)((REGINFO *)(dbenv)->reginfo)->primary)->envpanic = (onoff);
+
+/*
+ * All regions are created on 8K boundaries out of sheer paranoia, so we
+ * don't make some underlying VM unhappy. Make sure we don't overflow or
+ * underflow.
+ */
+#define OS_VMPAGESIZE (8 * 1024)
+#define OS_VMROUNDOFF(i) { \
+ if ((i) < \
+ (UINT32_T_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE) \
+ (i) += OS_VMPAGESIZE - 1; \
+ (i) -= (i) % OS_VMPAGESIZE; \
+}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_REGION_H_ */
diff --git a/storage/bdb/dbinc/rep.h b/storage/bdb/dbinc/rep.h
new file mode 100644
index 00000000000..1e315494c87
--- /dev/null
+++ b/storage/bdb/dbinc/rep.h
@@ -0,0 +1,184 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef _REP_H_
+#define _REP_H_
+
+#define REP_ALIVE 1 /* I am alive message. */
+#define REP_ALIVE_REQ 2 /* Request for alive messages. */
+#define REP_ALL_REQ 3 /* Request all log records greater than LSN. */
+#define REP_ELECT 4 /* Indicates that all listeners should */
+ /* begin master election */
+#define REP_FILE 6 /* Page of a database file. */
+#define REP_FILE_REQ 7 /* Request for a database file. */
+#define REP_LOG 8 /* Log record. */
+#define REP_LOG_MORE 9 /* There are more log records to request. */
+#define REP_LOG_REQ 10 /* Request for a log record. */
+#define REP_MASTER_REQ 11 /* Who is the master */
+#define REP_NEWCLIENT 12 /* Announces the presence of a new client. */
+#define REP_NEWFILE 13 /* Announce a log file change. */
+#define REP_NEWMASTER 14 /* Announces who the master is. */
+#define REP_NEWSITE 15 /* Announces that a site has heard from a new
+ * site; like NEWCLIENT, but indirect. A
+ * NEWCLIENT message comes directly from the new
+ * client while a NEWSITE comes indirectly from
+ * someone who heard about a NEWSITE.
+ */
+#define REP_PAGE 16 /* Database page. */
+#define REP_PAGE_REQ 17 /* Request for a database page. */
+#define REP_PLIST 18 /* Database page list. */
+#define REP_PLIST_REQ 19 /* Request for a page list. */
+#define REP_VERIFY 20 /* A log record for verification. */
+#define REP_VERIFY_FAIL 21 /* The client is outdated. */
+#define REP_VERIFY_REQ 22 /* Request for a log record to verify. */
+#define REP_VOTE1 23 /* Send out your information for an election. */
+#define REP_VOTE2 24 /* Send a "you are master" vote. */
+
+/* Used to consistently designate which messages ought to be received where. */
+#define MASTER_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_MASTER)) return (EINVAL)
+
+#define CLIENT_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_CLIENT)) return (EINVAL)
+
+#define ANYSITE(dbenv)
+
+/* Shared replication structure. */
+
+typedef struct __rep {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * db_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX mutex; /* Region lock. */
+ roff_t db_mutex_off; /* Client database mutex. */
+ u_int32_t tally_off; /* Offset of the tally region. */
+ int eid; /* Environment id. */
+ int master_id; /* ID of the master site. */
+ u_int32_t gen; /* Replication generation number */
+ int asites; /* Space allocated for sites. */
+ int nsites; /* Number of sites in group. */
+ int priority; /* My priority in an election. */
+ u_int32_t gbytes; /* Limit on data sent in single... */
+ u_int32_t bytes; /* __rep_process_message call. */
+#define DB_REP_REQUEST_GAP 4
+#define DB_REP_MAX_GAP 128
+ u_int32_t request_gap; /* # of records to receive before we
+ * request a missing log record. */
+ u_int32_t max_gap; /* Maximum number of records before
+ * requesting a missing log record. */
+
+ /* Vote tallying information. */
+ int sites; /* Sites heard from. */
+ int winner; /* Current winner. */
+ int w_priority; /* Winner priority. */
+ u_int32_t w_gen; /* Winner generation. */
+ DB_LSN w_lsn; /* Winner LSN. */
+ int w_tiebreaker; /* Winner tiebreaking value. */
+ int votes; /* Number of votes for this site. */
+
+ /* Statistics. */
+ DB_REP_STAT stat;
+
+#define REP_F_EPHASE1 0x01 /* In phase 1 of election. */
+#define REP_F_EPHASE2 0x02 /* In phase 2 of election. */
+#define REP_F_LOGSONLY 0x04 /* Log-site only; cannot be upgraded. */
+#define REP_F_MASTER 0x08 /* Master replica. */
+#define REP_F_RECOVER 0x10
+#define REP_F_UPGRADE 0x20 /* Upgradeable replica. */
+#define REP_ISCLIENT (REP_F_UPGRADE | REP_F_LOGSONLY)
+ u_int32_t flags;
+} REP;
+
+#define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+#define ELECTION_DONE(R) F_CLR((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+
+/*
+ * Per-process replication structure.
+ */
+struct __db_rep {
+ DB_MUTEX *mutexp;
+
+ DB_MUTEX *db_mutexp; /* Mutex for bookkeeping database. */
+ DB *rep_db; /* Bookkeeping database. */
+
+ REP *region; /* In memory structure. */
+ int (*rep_send) /* Send function. */
+ __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+};
+
+/*
+ * Control structure for replication communication infrastructure.
+ *
+ * Note that the version information should be at the beginning of the
+ * structure, so that we can rearrange the rest of it while letting the
+ * version checks continue to work. DB_REPVERSION should be revved any time
+ * the rest of the structure changes.
+ */
+typedef struct __rep_control {
+#define DB_REPVERSION 1
+ u_int32_t rep_version; /* Replication version number. */
+ u_int32_t log_version; /* Log version number. */
+
+ DB_LSN lsn; /* Log sequence number. */
+ u_int32_t rectype; /* Message type. */
+ u_int32_t gen; /* Generation number. */
+ u_int32_t flags; /* log_put flag value. */
+} REP_CONTROL;
+
+/* Election vote information. */
+typedef struct __rep_vote {
+ int priority; /* My site's priority. */
+ int nsites; /* Number of sites I've been in
+ * communication with. */
+ int tiebreaker; /* Tie-breaking quasi-random int. */
+} REP_VOTE_INFO;
+
+/*
+ * This structure takes care of representing a transaction.
+ * It holds all the records, sorted by page number so that
+ * we can obtain locks and apply updates in a deadlock free
+ * order.
+ */
+typedef struct __lsn_page {
+ DB_LSN lsn;
+ u_int32_t fid;
+ DB_LOCK_ILOCK pgdesc;
+#define LSN_PAGE_NOLOCK 0x0001 /* No lock necessary for log rec. */
+ u_int32_t flags;
+} LSN_PAGE;
+
+typedef struct __txn_recs {
+ int npages;
+ int nalloc;
+ LSN_PAGE *array;
+ u_int32_t txnid;
+ u_int32_t lockid;
+} TXN_RECS;
+
+typedef struct __lsn_collection {
+ int nlsns;
+ int nalloc;
+ DB_LSN *array;
+} LSN_COLLECTION;
+
+/*
+ * This is used by the page-prep routines to do the lock_vec call to
+ * apply the updates for a single transaction or a collection of
+ * transactions.
+ */
+typedef struct _linfo {
+ int n;
+ DB_LOCKREQ *reqs;
+ DBT *objs;
+} linfo_t;
+
+#include "dbinc_auto/rep_ext.h"
+#endif /* !_REP_H_ */
diff --git a/storage/bdb/dbinc/shqueue.h b/storage/bdb/dbinc/shqueue.h
new file mode 100644
index 00000000000..47fdf12ac92
--- /dev/null
+++ b/storage/bdb/dbinc/shqueue.h
@@ -0,0 +1,337 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: shqueue.h,v 11.9 2002/01/11 15:52:30 bostic Exp $
+ */
+
+#ifndef _SYS_SHQUEUE_H_
+#define _SYS_SHQUEUE_H_
+
+/*
+ * This file defines three types of data structures: lists, tail queues, and
+ * circular queues, similarly to the include file <sys/queue.h>.
+ *
+ * The difference is that this set of macros can be used for structures that
+ * reside in shared memory that may be mapped at different addresses in each
+ * process. In most cases, the macros for shared structures exactly mirror
+ * the normal macros, although the macro calls require an additional type
+ * parameter, only used by the HEAD and ENTRY macros of the standard macros.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Shared list definitions.
+ */
+#define SH_LIST_HEAD(name) \
+struct name { \
+ ssize_t slh_first; /* first element */ \
+}
+
+#define SH_LIST_ENTRY \
+struct { \
+ ssize_t sle_next; /* relative offset next element */ \
+ ssize_t sle_prev; /* relative offset of prev element */ \
+}
+
+/*
+ * Shared list functions. Since we use relative offsets for pointers,
+ * 0 is a valid offset. Therefore, we use -1 to indicate end of list.
+ * The macros ending in "P" return pointers without checking for end
+ * of list, the others check for end of list and evaluate to either a
+ * pointer or NULL.
+ */
+
+#define SH_LIST_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first))
+
+#define SH_LIST_FIRST(head, type) \
+ ((head)->slh_first == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->slh_first)))
+
+#define SH_LIST_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next))
+
+#define SH_LIST_NEXT(elm, field, type) \
+ ((elm)->field.sle_next == -1 ? NULL : \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.sle_next)))
+
+#define SH_LIST_PREV(elm, field) \
+ ((ssize_t *)(((u_int8_t *)(elm)) + (elm)->field.sle_prev))
+
+#define SH_PTR_TO_OFF(src, dest) \
+ ((ssize_t)(((u_int8_t *)(dest)) - ((u_int8_t *)(src))))
+
+/*
+ * Take the element's next pointer and calculate what the corresponding
+ * Prev pointer should be -- basically it is the negation plus the offset
+ * of the next field in the structure.
+ */
+#define SH_LIST_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.sle_next + SH_PTR_TO_OFF(elm, &(elm)->field.sle_next))
+
+#define SH_LIST_INIT(head) (head)->slh_first = -1
+
+#define SH_LIST_INSERT_AFTER(listelm, elm, field, type) do { \
+ if ((listelm)->field.sle_next != -1) { \
+ (elm)->field.sle_next = SH_PTR_TO_OFF(elm, \
+ SH_LIST_NEXTP(listelm, field, type)); \
+ SH_LIST_NEXTP(listelm, field, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (listelm)->field.sle_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.sle_prev = SH_LIST_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_LIST_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->slh_first != -1) { \
+ (elm)->field.sle_next = \
+ (head)->slh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_LIST_FIRSTP(head, type)->field.sle_prev = \
+ SH_LIST_NEXT_TO_PREV(elm, field); \
+ } else \
+ (elm)->field.sle_next = -1; \
+ (head)->slh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.sle_prev = SH_PTR_TO_OFF(elm, &(head)->slh_first); \
+} while (0)
+
+#define SH_LIST_REMOVE(elm, field, type) do { \
+ if ((elm)->field.sle_next != -1) { \
+ SH_LIST_NEXTP(elm, field, type)->field.sle_prev = \
+ (elm)->field.sle_prev - (elm)->field.sle_next; \
+ *SH_LIST_PREV(elm, field) += (elm)->field.sle_next; \
+ } else \
+ *SH_LIST_PREV(elm, field) = -1; \
+} while (0)
+
+/*
+ * Shared tail queue definitions.
+ */
+#define SH_TAILQ_HEAD(name) \
+struct name { \
+ ssize_t stqh_first; /* relative offset of first element */ \
+ ssize_t stqh_last; /* relative offset of last's next */ \
+}
+
+#define SH_TAILQ_ENTRY \
+struct { \
+ ssize_t stqe_next; /* relative offset of next element */ \
+ ssize_t stqe_prev; /* relative offset of prev's next */ \
+}
+
+/*
+ * Shared tail queue functions.
+ */
+#define SH_TAILQ_FIRSTP(head, type) \
+ ((struct type *)((u_int8_t *)(head) + (head)->stqh_first))
+
+#define SH_TAILQ_FIRST(head, type) \
+ ((head)->stqh_first == -1 ? NULL : SH_TAILQ_FIRSTP(head, type))
+
+#define SH_TAILQ_NEXTP(elm, field, type) \
+ ((struct type *)((u_int8_t *)(elm) + (elm)->field.stqe_next))
+
+#define SH_TAILQ_NEXT(elm, field, type) \
+ ((elm)->field.stqe_next == -1 ? NULL : SH_TAILQ_NEXTP(elm, field, type))
+
+#define SH_TAILQ_PREVP(elm, field) \
+ ((ssize_t *)((u_int8_t *)(elm) + (elm)->field.stqe_prev))
+
+#define SH_TAILQ_LAST(head) \
+ ((ssize_t *)(((u_int8_t *)(head)) + (head)->stqh_last))
+
+#define SH_TAILQ_NEXT_TO_PREV(elm, field) \
+ (-(elm)->field.stqe_next + SH_PTR_TO_OFF(elm, &(elm)->field.stqe_next))
+
+#define SH_TAILQ_INIT(head) { \
+ (head)->stqh_first = -1; \
+ (head)->stqh_last = SH_PTR_TO_OFF(head, &(head)->stqh_first); \
+}
+
+#define SH_TAILQ_INSERT_HEAD(head, elm, field, type) do { \
+ if ((head)->stqh_first != -1) { \
+ (elm)->field.stqe_next = \
+ (head)->stqh_first - SH_PTR_TO_OFF(head, elm); \
+ SH_TAILQ_FIRSTP(head, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &(elm)->field.stqe_next); \
+ } \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ (elm)->field.stqe_prev = \
+ SH_PTR_TO_OFF(elm, &(head)->stqh_first); \
+} while (0)
+
+#define SH_TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = -1; \
+ (elm)->field.stqe_prev = \
+ -SH_PTR_TO_OFF(head, elm) + (head)->stqh_last; \
+ if ((head)->stqh_last == \
+ SH_PTR_TO_OFF((head), &(head)->stqh_first)) \
+ (head)->stqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ *SH_TAILQ_LAST(head) = -(head)->stqh_last + \
+ SH_PTR_TO_OFF((elm), &(elm)->field.stqe_next) + \
+ SH_PTR_TO_OFF(head, elm); \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &((elm)->field.stqe_next)); \
+} while (0)
+
+#define SH_TAILQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ if ((listelm)->field.stqe_next != -1) { \
+ (elm)->field.stqe_next = (listelm)->field.stqe_next - \
+ SH_PTR_TO_OFF(listelm, elm); \
+ SH_TAILQ_NEXTP(listelm, field, type)->field.stqe_prev = \
+ SH_TAILQ_NEXT_TO_PREV(elm, field); \
+ } else { \
+ (elm)->field.stqe_next = -1; \
+ (head)->stqh_last = \
+ SH_PTR_TO_OFF(head, &elm->field.stqe_next); \
+ } \
+ (listelm)->field.stqe_next = SH_PTR_TO_OFF(listelm, elm); \
+ (elm)->field.stqe_prev = SH_TAILQ_NEXT_TO_PREV(listelm, field); \
+} while (0)
+
+#define SH_TAILQ_REMOVE(head, elm, field, type) do { \
+ if ((elm)->field.stqe_next != -1) { \
+ SH_TAILQ_NEXTP(elm, field, type)->field.stqe_prev = \
+ (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(SH_TAILQ_NEXTP(elm, \
+ field, type), elm); \
+ *SH_TAILQ_PREVP(elm, field) += elm->field.stqe_next; \
+ } else { \
+ (head)->stqh_last = (elm)->field.stqe_prev + \
+ SH_PTR_TO_OFF(head, elm); \
+ *SH_TAILQ_PREVP(elm, field) = -1; \
+ } \
+} while (0)
+
+/*
+ * Shared circular queue definitions.
+ */
+#define SH_CIRCLEQ_HEAD(name) \
+struct name { \
+ ssize_t scqh_first; /* first element */ \
+ ssize_t scqh_last; /* last element */ \
+}
+
+#define SH_CIRCLEQ_ENTRY \
+struct { \
+ ssize_t scqe_next; /* next element */ \
+ ssize_t scqe_prev; /* previous element */ \
+}
+
+/*
+ * Shared circular queue functions.
+ */
+#define SH_CIRCLEQ_FIRSTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_first))
+
+#define SH_CIRCLEQ_FIRST(head, type) \
+ ((head)->scqh_first == -1 ? \
+ (void *)head : SH_CIRCLEQ_FIRSTP(head, type))
+
+#define SH_CIRCLEQ_LASTP(head, type) \
+ ((struct type *)(((u_int8_t *)(head)) + (head)->scqh_last))
+
+#define SH_CIRCLEQ_LAST(head, type) \
+ ((head)->scqh_last == -1 ? (void *)head : SH_CIRCLEQ_LASTP(head, type))
+
+#define SH_CIRCLEQ_NEXTP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_next))
+
+#define SH_CIRCLEQ_NEXT(head, elm, field, type) \
+ ((elm)->field.scqe_next == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_NEXTP(elm, field, type))
+
+#define SH_CIRCLEQ_PREVP(elm, field, type) \
+ ((struct type *)(((u_int8_t *)(elm)) + (elm)->field.scqe_prev))
+
+#define SH_CIRCLEQ_PREV(head, elm, field, type) \
+ ((elm)->field.scqe_prev == SH_PTR_TO_OFF(elm, head) ? \
+ (void *)head : SH_CIRCLEQ_PREVP(elm, field, type))
+
+#define SH_CIRCLEQ_INIT(head) { \
+ (head)->scqh_first = 0; \
+ (head)->scqh_last = 0; \
+}
+
+#define SH_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_next = (listelm)->field.scqe_next + \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_NEXTP(listelm, field, type) == (void *)head) \
+ (head)->scqh_last = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_NEXTP(listelm, \
+ field, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_NEXTP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_next = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, listelm); \
+ (elm)->field.scqe_prev = (elm)->field.scqe_next - \
+ SH_CIRCLEQ_PREVP(listelm, field, type)->field.scqe_next;\
+ if (SH_CIRCLEQ_PREVP(listelm, field, type) == (void *)(head)) \
+ (head)->scqh_first = SH_PTR_TO_OFF(head, elm); \
+ else \
+ SH_CIRCLEQ_PREVP(listelm, \
+ field, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_PREVP(listelm, \
+ field, type), elm); \
+ (listelm)->field.scqe_prev = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_HEAD(head, elm, field, type) do { \
+ (elm)->field.scqe_prev = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_next = (head)->scqh_first + \
+ (elm)->field.scqe_prev; \
+ if ((head)->scqh_last == 0) \
+ (head)->scqh_last = -(elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_FIRSTP(head, type)->field.scqe_prev = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_FIRSTP(head, type), elm); \
+ (head)->scqh_first = -(elm)->field.scqe_prev; \
+} while (0)
+
+#define SH_CIRCLEQ_INSERT_TAIL(head, elm, field, type) do { \
+ (elm)->field.scqe_next = SH_PTR_TO_OFF(elm, head); \
+ (elm)->field.scqe_prev = (head)->scqh_last + \
+ (elm)->field.scqe_next; \
+ if ((head)->scqh_first == 0) \
+ (head)->scqh_first = -(elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_LASTP(head, type)->field.scqe_next = \
+ SH_PTR_TO_OFF(SH_CIRCLEQ_LASTP(head, type), elm); \
+ (head)->scqh_last = -(elm)->field.scqe_next; \
+} while (0)
+
+#define SH_CIRCLEQ_REMOVE(head, elm, field, type) do { \
+ if (SH_CIRCLEQ_NEXTP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_last += (elm)->field.scqe_prev; \
+ else \
+ SH_CIRCLEQ_NEXTP(elm, field, type)->field.scqe_prev += \
+ (elm)->field.scqe_prev; \
+ if (SH_CIRCLEQ_PREVP(elm, field, type) == (void *)(head)) \
+ (head)->scqh_first += (elm)->field.scqe_next; \
+ else \
+ SH_CIRCLEQ_PREVP(elm, field, type)->field.scqe_next += \
+ (elm)->field.scqe_next; \
+} while (0)
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_SYS_SHQUEUE_H_ */
diff --git a/storage/bdb/dbinc/tcl_db.h b/storage/bdb/dbinc/tcl_db.h
new file mode 100644
index 00000000000..8c04d545295
--- /dev/null
+++ b/storage/bdb/dbinc/tcl_db.h
@@ -0,0 +1,261 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: tcl_db.h,v 11.30 2002/08/06 06:11:22 bostic Exp $
+ */
+
+#ifndef _DB_TCL_DB_H_
+#define _DB_TCL_DB_H_
+
+#define MSG_SIZE 100 /* Message size */
+
+enum INFOTYPE {
+ I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX };
+
+#define MAX_ID 8 /* Maximum number of sub-id's we need */
+#define DBTCL_PREP 64 /* Size of txn_recover preplist */
+
+#define DBTCL_DBM 1
+#define DBTCL_NDBM 2
+
+typedef struct _mutex_entry {
+ union {
+ struct {
+ DB_MUTEX real_m;
+ u_int32_t real_val;
+ } r;
+ /*
+ * This is here to make sure that each of the mutex structures
+ * are 16-byte aligned, which is required on HP architectures.
+ * The db_mutex_t structure might be >32 bytes itself, or the
+ * real_val might push it over the 32 byte boundary. The best
+ * we can do is use a 48 byte boundary.
+ */
+ char c[48];
+ } u;
+} _MUTEX_ENTRY;
+
+#define m u.r.real_m
+#define val u.r.real_val
+
+typedef struct _mutex_data {
+ DB_ENV *env;
+ REGINFO reginfo;
+ _MUTEX_ENTRY *marray;
+ size_t size;
+ u_int32_t n_mutex;
+} _MUTEX_DATA;
+
+/*
+ * Why use a home grown package over the Tcl_Hash functions?
+ *
+ * We could have implemented the stuff below without maintaining our
+ * own list manipulation, efficiently hashing it with the available
+ * Tcl functions (Tcl_CreateHashEntry, Tcl_GetHashValue, etc). I chose
+ * not to do so for these reasons:
+ *
+ * We still need the information below. Using the hashing only removes
+ * us from needing the next/prev pointers. We still need the structure
+ * itself because we need more than one value associated with a widget.
+ * We need to keep track of parent pointers for sub-widgets (like cursors)
+ * so we can correctly close. We need to keep track of individual widget's
+ * id counters for any sub-widgets they may have. We need to be able to
+ * associate the name/client data outside the scope of the widget.
+ *
+ * So, is it better to use the hashing rather than
+ * the linear list we have now? I decided against it for the simple reason
+ * that to access the structure would require two calls. The first is
+ * Tcl_FindHashEntry(table, key) and then, once we have the entry, we'd
+ * have to do Tcl_GetHashValue(entry) to get the pointer of the structure.
+ *
+ * I believe the number of simultaneous DB widgets in existence at one time
+ * is not going to be that large (more than several dozen) such that
+ * linearly searching the list is not going to impact performance in a
+ * noticable way. Should performance be impacted due to the size of the
+ * info list, then perhaps it is time to revisit this decision.
+ */
+typedef struct dbtcl_info {
+ LIST_ENTRY(dbtcl_info) entries;
+ Tcl_Interp *i_interp;
+ char *i_name;
+ enum INFOTYPE i_type;
+ union infop {
+ DB_ENV *envp;
+ void *anyp;
+ DB *dbp;
+ DBC *dbcp;
+ DB_TXN *txnp;
+ DB_MPOOLFILE *mp;
+ DB_LOCK *lock;
+ _MUTEX_DATA *mutex;
+ DB_LOGC *logc;
+ } un;
+ union data {
+ int anydata;
+ db_pgno_t pgno;
+ u_int32_t lockid;
+ } und;
+ union data2 {
+ int anydata;
+ size_t pagesz;
+ } und2;
+ DBT i_lockobj;
+ FILE *i_err;
+ char *i_errpfx;
+
+ /* Callbacks--Tcl_Objs containing proc names */
+ Tcl_Obj *i_btcompare;
+ Tcl_Obj *i_dupcompare;
+ Tcl_Obj *i_hashproc;
+ Tcl_Obj *i_rep_send;
+ Tcl_Obj *i_second_call;
+
+ /* Environment ID for the i_rep_send callback. */
+ Tcl_Obj *i_rep_eid;
+
+ struct dbtcl_info *i_parent;
+ int i_otherid[MAX_ID];
+} DBTCL_INFO;
+
+#define i_anyp un.anyp
+#define i_pagep un.anyp
+#define i_envp un.envp
+#define i_dbp un.dbp
+#define i_dbcp un.dbcp
+#define i_txnp un.txnp
+#define i_mp un.mp
+#define i_lock un.lock
+#define i_mutex un.mutex
+#define i_logc un.logc
+
+#define i_data und.anydata
+#define i_pgno und.pgno
+#define i_locker und.lockid
+#define i_data2 und2.anydata
+#define i_pgsz und2.pagesz
+
+#define i_envtxnid i_otherid[0]
+#define i_envmpid i_otherid[1]
+#define i_envlockid i_otherid[2]
+#define i_envmutexid i_otherid[3]
+#define i_envlogcid i_otherid[4]
+
+#define i_mppgid i_otherid[0]
+
+#define i_dbdbcid i_otherid[0]
+
+extern int __debug_on, __debug_print, __debug_stop, __debug_test;
+
+typedef struct dbtcl_global {
+ LIST_HEAD(infohead, dbtcl_info) g_infohead;
+} DBTCL_GLOBAL;
+#define __db_infohead __dbtcl_global.g_infohead
+
+extern DBTCL_GLOBAL __dbtcl_global;
+
+#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name))
+#define NAME_TO_DB(name) (DB *)_NameToPtr((name))
+#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name))
+#define NAME_TO_TXN(name) (DB_TXN *)_NameToPtr((name))
+#define NAME_TO_MP(name) (DB_MPOOLFILE *)_NameToPtr((name))
+#define NAME_TO_LOCK(name) (DB_LOCK *)_NameToPtr((name))
+
+/*
+ * MAKE_STAT_LIST appends a {name value} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LIST(s,v) \
+do { \
+ result = _SetListElemInt(interp, res, (s), (v)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * MAKE_STAT_LSN appends a {name {LSNfile LSNoffset}} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LSN(s, lsn) \
+do { \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewLongObj((long)(lsn)->file); \
+ myobjv[1] = Tcl_NewLongObj((long)(lsn)->offset); \
+ lsnlist = Tcl_NewListObj(myobjc, myobjv); \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \
+ myobjv[1] = lsnlist; \
+ thislist = Tcl_NewListObj(myobjc, myobjv); \
+ result = Tcl_ListObjAppendElement(interp, res, thislist); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * MAKE_STAT_STRLIST appends a {name string} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_STRLIST(s,s1) \
+do { \
+ result = _SetListElem(interp, res, (s), strlen(s), \
+ (s1), strlen(s1)); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
+ * FLAG_CHECK checks that the given flag is not set yet.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK(flag) \
+do { \
+ if ((flag) != 0) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * FLAG_CHECK2 checks that the given flag is not set yet or is
+ * only set to the given allowed value.
+ * If it is, it sets up an error message.
+ */
+#define FLAG_CHECK2(flag,val) \
+do { \
+ if (((flag) & ~(val)) != 0) { \
+ Tcl_SetResult(interp, \
+ " Only 1 policy can be specified.\n", \
+ TCL_STATIC); \
+ result = TCL_ERROR; \
+ break; \
+ } \
+} while (0)
+
+/*
+ * IS_HELP checks whether the arg we bombed on is -?, which is a help option.
+ * If it is, we return TCL_OK (but leave the result set to whatever
+ * Tcl_GetIndexFromObj says, which lists all the valid options. Otherwise
+ * return TCL_ERROR.
+ */
+#define IS_HELP(s) \
+ (strcmp(Tcl_GetStringFromObj(s,NULL), "-?") == 0) ? TCL_OK : TCL_ERROR
+
+#include "dbinc_auto/tcl_ext.h"
+#endif /* !_DB_TCL_DB_H_ */
diff --git a/storage/bdb/dbinc/txn.h b/storage/bdb/dbinc/txn.h
new file mode 100644
index 00000000000..31b00a6ba74
--- /dev/null
+++ b/storage/bdb/dbinc/txn.h
@@ -0,0 +1,143 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: txn.h,v 11.43 2002/08/29 14:22:19 margo Exp $
+ */
+
+#ifndef _TXN_H_
+#define _TXN_H_
+
+#include "dbinc/xa.h"
+
+/* Operation parameters to the delayed commit processing code. */
+typedef enum {
+ TXN_REMOVE, /* Remove a file. */
+ TXN_TRADE, /* Trade lockers. */
+ TXN_TRADED /* Already traded; downgrade lock. */
+} TXN_EVENT_T;
+
+struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION;
+
+/*
+ * !!!
+ * TXN_MINIMUM = (DB_LOCK_MAXID + 1) but this makes compilers complain.
+ */
+#define TXN_MINIMUM 0x80000000
+#define TXN_MAXIMUM 0xffffffff /* Maximum number of txn ids. */
+#define TXN_INVALID 0 /* Invalid transaction ID. */
+
+#define DEF_MAX_TXNS 20 /* Default max transactions. */
+
+/*
+ * Internal data maintained in shared memory for each transaction.
+ */
+typedef struct __txn_detail {
+ u_int32_t txnid; /* current transaction id
+ used to link free list also */
+ DB_LSN last_lsn; /* last lsn written for this txn */
+ DB_LSN begin_lsn; /* lsn of begin record */
+ roff_t parent; /* Offset of transaction's parent. */
+
+#define TXN_RUNNING 1
+#define TXN_ABORTED 2
+#define TXN_PREPARED 3
+#define TXN_COMMITTED 4
+ u_int32_t status; /* status of the transaction */
+#define TXN_COLLECTED 0x1
+#define TXN_RESTORED 0x2
+ u_int32_t flags; /* collected during txn_recover */
+
+ SH_TAILQ_ENTRY links; /* free/active list */
+
+#define TXN_XA_ABORTED 1
+#define TXN_XA_DEADLOCKED 2
+#define TXN_XA_ENDED 3
+#define TXN_XA_PREPARED 4
+#define TXN_XA_STARTED 5
+#define TXN_XA_SUSPENDED 6
+ u_int32_t xa_status; /* XA status */
+
+ /*
+ * XID (xid_t) structure: because these fields are logged, the
+ * sizes have to be explicit.
+ */
+ u_int8_t xid[XIDDATASIZE]; /* XA global transaction id */
+ u_int32_t bqual; /* bqual_length from XID */
+ u_int32_t gtrid; /* gtrid_length from XID */
+ int32_t format; /* XA format */
+} TXN_DETAIL;
+
+/*
+ * DB_TXNMGR --
+ * The transaction manager encapsulates the transaction system.
+ */
+struct __db_txnmgr {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ DB_MUTEX *mutexp; /* Lock list of active transactions
+ * (including the content of each
+ * TXN_DETAIL structure on the list).
+ */
+ /* List of active transactions. */
+ TAILQ_HEAD(_chain, __db_txn) txn_chain;
+ u_int32_t n_discards; /* Number of txns discarded. */
+
+/* These fields are never updated after creation, and so not protected. */
+ DB_ENV *dbenv; /* Environment. */
+ REGINFO reginfo; /* Region information. */
+};
+
+/*
+ * DB_TXNREGION --
+ * The primary transaction data structure in the shared memory region.
+ */
+struct __db_txnregion {
+ u_int32_t maxtxns; /* maximum number of active TXNs */
+ u_int32_t last_txnid; /* last transaction id given out */
+ u_int32_t cur_maxid; /* current max unused id. */
+ DB_LSN last_ckp; /* lsn of the last checkpoint */
+ time_t time_ckp; /* time of last checkpoint */
+ u_int32_t logtype; /* type of logging */
+ u_int32_t locktype; /* lock type */
+ DB_TXN_STAT stat; /* Statistics for txns. */
+
+#define TXN_IN_RECOVERY 0x01 /* environment is being recovered */
+ u_int32_t flags;
+ /* active TXN list */
+ SH_TAILQ_HEAD(__active) active_txn;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define TXN_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
+
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
+
+/*
+ * Log record types. Note that these are *not* alphabetical. This is
+ * intentional so that we don't change the meaning of values between
+ * software upgrades. EXPECTED, UNEXPECTED, IGNORE, NOTFOUND and OK
+ * are used in the
+ * txnlist functions.
+ */
+#define TXN_OK 0
+#define TXN_COMMIT 1
+#define TXN_PREPARE 2
+#define TXN_ABORT 3
+#define TXN_NOTFOUND 4
+#define TXN_IGNORE 5
+#define TXN_EXPECTED 6
+#define TXN_UNEXPECTED 7
+
+#include "dbinc_auto/txn_auto.h"
+#include "dbinc_auto/txn_ext.h"
+#include "dbinc_auto/xa_ext.h"
+#endif /* !_TXN_H_ */
diff --git a/storage/bdb/dbinc/xa.h b/storage/bdb/dbinc/xa.h
new file mode 100644
index 00000000000..64bdac8c914
--- /dev/null
+++ b/storage/bdb/dbinc/xa.h
@@ -0,0 +1,179 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: xa.h,v 11.5 2002/01/11 15:52:30 bostic Exp $
+ */
+/*
+ * Start of xa.h header
+ *
+ * Define a symbol to prevent multiple inclusions of this header file
+ */
+#ifndef XA_H
+#define XA_H
+
+/*
+ * Transaction branch identification: XID and NULLXID:
+ */
+#define XIDDATASIZE 128 /* size in bytes */
+#define MAXGTRIDSIZE 64 /* maximum size in bytes of gtrid */
+#define MAXBQUALSIZE 64 /* maximum size in bytes of bqual */
+
+struct xid_t {
+ long formatID; /* format identifier */
+ long gtrid_length; /* value from 1 through 64 */
+ long bqual_length; /* value from 1 through 64 */
+ char data[XIDDATASIZE];
+};
+typedef struct xid_t XID;
+/*
+ * A value of -1 in formatID means that the XID is null.
+ */
+
+/*
+ * Declarations of routines by which RMs call TMs:
+ */
+extern int ax_reg __P((int, XID *, long));
+extern int ax_unreg __P((int, long));
+
+/*
+ * XA Switch Data Structure
+ */
+#define RMNAMESZ 32 /* length of resource manager name, */
+ /* including the null terminator */
+#define MAXINFOSIZE 256 /* maximum size in bytes of xa_info */
+ /* strings, including the null
+ terminator */
+struct xa_switch_t {
+ char name[RMNAMESZ]; /* name of resource manager */
+ long flags; /* resource manager specific options */
+ long version; /* must be 0 */
+ int (*xa_open_entry) /* xa_open function pointer */
+ __P((char *, int, long));
+ int (*xa_close_entry) /* xa_close function pointer */
+ __P((char *, int, long));
+ int (*xa_start_entry) /* xa_start function pointer */
+ __P((XID *, int, long));
+ int (*xa_end_entry) /* xa_end function pointer */
+ __P((XID *, int, long));
+ int (*xa_rollback_entry) /* xa_rollback function pointer */
+ __P((XID *, int, long));
+ int (*xa_prepare_entry) /* xa_prepare function pointer */
+ __P((XID *, int, long));
+ int (*xa_commit_entry) /* xa_commit function pointer */
+ __P((XID *, int, long));
+ int (*xa_recover_entry) /* xa_recover function pointer */
+ __P((XID *, long, int, long));
+ int (*xa_forget_entry) /* xa_forget function pointer */
+ __P((XID *, int, long));
+ int (*xa_complete_entry) /* xa_complete function pointer */
+ __P((int *, int *, int, long));
+};
+
+/*
+ * Flag definitions for the RM switch
+ */
+#define TMNOFLAGS 0x00000000L /* no resource manager features
+ selected */
+#define TMREGISTER 0x00000001L /* resource manager dynamically
+ registers */
+#define TMNOMIGRATE 0x00000002L /* resource manager does not support
+ association migration */
+#define TMUSEASYNC 0x00000004L /* resource manager supports
+ asynchronous operations */
+/*
+ * Flag definitions for xa_ and ax_ routines
+ */
+/* use TMNOFLAGGS, defined above, when not specifying other flags */
+#define TMASYNC 0x80000000L /* perform routine asynchronously */
+#define TMONEPHASE 0x40000000L /* caller is using one-phase commit
+ optimisation */
+#define TMFAIL 0x20000000L /* dissociates caller and marks
+ transaction branch rollback-only */
+#define TMNOWAIT 0x10000000L /* return if blocking condition
+ exists */
+#define TMRESUME 0x08000000L /* caller is resuming association with
+ suspended transaction branch */
+#define TMSUCCESS 0x04000000L /* dissociate caller from transaction
+ branch */
+#define TMSUSPEND 0x02000000L /* caller is suspending, not ending,
+ association */
+#define TMSTARTRSCAN 0x01000000L /* start a recovery scan */
+#define TMENDRSCAN 0x00800000L /* end a recovery scan */
+#define TMMULTIPLE 0x00400000L /* wait for any asynchronous
+ operation */
+#define TMJOIN 0x00200000L /* caller is joining existing
+ transaction branch */
+#define TMMIGRATE 0x00100000L /* caller intends to perform
+ migration */
+
+/*
+ * ax_() return codes (transaction manager reports to resource manager)
+ */
+#define TM_JOIN 2 /* caller is joining existing
+ transaction branch */
+#define TM_RESUME 1 /* caller is resuming association with
+ suspended transaction branch */
+#define TM_OK 0 /* normal execution */
+#define TMER_TMERR -1 /* an error occurred in the transaction
+ manager */
+#define TMER_INVAL -2 /* invalid arguments were given */
+#define TMER_PROTO -3 /* routine invoked in an improper
+ context */
+
+/*
+ * xa_() return codes (resource manager reports to transaction manager)
+ */
+#define XA_RBBASE 100 /* The inclusive lower bound of the
+ rollback codes */
+#define XA_RBROLLBACK XA_RBBASE /* The rollback was caused by an
+ unspecified reason */
+#define XA_RBCOMMFAIL XA_RBBASE+1 /* The rollback was caused by a
+ communication failure */
+#define XA_RBDEADLOCK XA_RBBASE+2 /* A deadlock was detected */
+#define XA_RBINTEGRITY XA_RBBASE+3 /* A condition that violates the
+ integrity of the resources was
+ detected */
+#define XA_RBOTHER XA_RBBASE+4 /* The resource manager rolled back the
+ transaction branch for a reason not
+ on this list */
+#define XA_RBPROTO XA_RBBASE+5 /* A protocol error occurred in the
+ resource manager */
+#define XA_RBTIMEOUT XA_RBBASE+6 /* A transaction branch took too long */
+#define XA_RBTRANSIENT XA_RBBASE+7 /* May retry the transaction branch */
+#define XA_RBEND XA_RBTRANSIENT /* The inclusive upper bound of the
+ rollback codes */
+#define XA_NOMIGRATE 9 /* resumption must occur where
+ suspension occurred */
+#define XA_HEURHAZ 8 /* the transaction branch may have
+ been heuristically completed */
+#define XA_HEURCOM 7 /* the transaction branch has been
+ heuristically committed */
+#define XA_HEURRB 6 /* the transaction branch has been
+ heuristically rolled back */
+#define XA_HEURMIX 5 /* the transaction branch has been
+ heuristically committed and rolled
+ back */
+#define XA_RETRY 4 /* routine returned with no effect and
+ may be re-issued */
+#define XA_RDONLY 3 /* the transaction branch was read-only
+ and has been committed */
+#define XA_OK 0 /* normal execution */
+#define XAER_ASYNC -2 /* asynchronous operation already
+ outstanding */
+#define XAER_RMERR -3 /* a resource manager error occurred in
+ the transaction branch */
+#define XAER_NOTA -4 /* the XID is not valid */
+#define XAER_INVAL -5 /* invalid arguments were given */
+#define XAER_PROTO -6 /* routine invoked in an improper
+ context */
+#define XAER_RMFAIL -7 /* resource manager unavailable */
+#define XAER_DUPID -8 /* the XID already exists */
+#define XAER_OUTSIDE -9 /* resource manager doing work outside
+ transaction */
+#endif /* ifndef XA_H */
+/*
+ * End of xa.h header
+ */
diff --git a/storage/bdb/dbm/dbm.c b/storage/bdb/dbm/dbm.c
new file mode 100644
index 00000000000..3aa6fff6982
--- /dev/null
+++ b/storage/bdb/dbm/dbm.c
@@ -0,0 +1,519 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbm.c,v 11.14 2002/02/22 16:11:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+/*
+ *
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_ndbm_clearerr __P((DBM *));
+ * EXTERN: void __db_ndbm_close __P((DBM *));
+ * EXTERN: int __db_ndbm_delete __P((DBM *, datum));
+ * EXTERN: int __db_ndbm_dirfno __P((DBM *));
+ * EXTERN: int __db_ndbm_error __P((DBM *));
+ * EXTERN: datum __db_ndbm_fetch __P((DBM *, datum));
+ * EXTERN: datum __db_ndbm_firstkey __P((DBM *));
+ * EXTERN: datum __db_ndbm_nextkey __P((DBM *));
+ * EXTERN: DBM *__db_ndbm_open __P((const char *, int, int));
+ * EXTERN: int __db_ndbm_pagfno __P((DBM *));
+ * EXTERN: int __db_ndbm_rdonly __P((DBM *));
+ * EXTERN: int __db_ndbm_store __P((DBM *, datum, datum, int));
+ *
+ * EXTERN: int __db_dbm_close __P((void));
+ * EXTERN: int __db_dbm_dbrdonly __P((void));
+ * EXTERN: int __db_dbm_delete __P((datum));
+ * EXTERN: int __db_dbm_dirf __P((void));
+ * EXTERN: datum __db_dbm_fetch __P((datum));
+ * EXTERN: datum __db_dbm_firstkey __P((void));
+ * EXTERN: int __db_dbm_init __P((char *));
+ * EXTERN: datum __db_dbm_nextkey __P((datum));
+ * EXTERN: int __db_dbm_pagf __P((void));
+ * EXTERN: int __db_dbm_store __P((datum, datum));
+ *
+ * EXTERN: #endif
+ */
+
+/*
+ * The DBM routines, which call the NDBM routines.
+ */
+static DBM *__cur_db;
+
+static void __db_no_open __P((void));
+
+int
+__db_dbm_init(file)
+ char *file;
+{
+ if (__cur_db != NULL)
+ (void)dbm_close(__cur_db);
+ if ((__cur_db =
+ dbm_open(file, O_CREAT | O_RDWR, __db_omode("rw----"))) != NULL)
+ return (0);
+ if ((__cur_db = dbm_open(file, O_RDONLY, 0)) != NULL)
+ return (0);
+ return (-1);
+}
+
+int
+__db_dbm_close()
+{
+ if (__cur_db != NULL) {
+ dbm_close(__cur_db);
+ __cur_db = NULL;
+ }
+ return (0);
+}
+
+datum
+__db_dbm_fetch(key)
+ datum key;
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_fetch(__cur_db, key));
+}
+
+datum
+__db_dbm_firstkey()
+{
+ datum item;
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_firstkey(__cur_db));
+}
+
+datum
+__db_dbm_nextkey(key)
+ datum key;
+{
+ datum item;
+
+ COMPQUIET(key.dsize, 0);
+
+ if (__cur_db == NULL) {
+ __db_no_open();
+ item.dptr = 0;
+ return (item);
+ }
+ return (dbm_nextkey(__cur_db));
+}
+
+int
+__db_dbm_delete(key)
+ datum key;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_delete(__cur_db, key));
+}
+
+int
+__db_dbm_store(key, dat)
+ datum key, dat;
+{
+ if (__cur_db == NULL) {
+ __db_no_open();
+ return (-1);
+ }
+ return (dbm_store(__cur_db, key, dat, DBM_REPLACE));
+}
+
+static void
+__db_no_open()
+{
+ (void)fprintf(stderr, "dbm: no open database.\n");
+}
+
+/*
+ * This package provides dbm and ndbm compatible interfaces to DB.
+ *
+ * The NDBM routines, which call the DB routines.
+ */
+/*
+ * Returns:
+ * *DBM on success
+ * NULL on failure
+ */
+DBM *
+__db_ndbm_open(file, oflags, mode)
+ const char *file;
+ int oflags, mode;
+{
+ DB *dbp;
+ DBC *dbc;
+ int ret;
+ char path[MAXPATHLEN];
+
+ /*
+ * !!!
+ * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and
+ * the latter isn't standard, and we're manipulating strings handed
+ * us by the application.
+ */
+ if (strlen(file) + strlen(DBM_SUFFIX) + 1 > sizeof(path)) {
+ __os_set_errno(ENAMETOOLONG);
+ return (NULL);
+ }
+ (void)strcpy(path, file);
+ (void)strcat(path, DBM_SUFFIX);
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ /*
+ * !!!
+ * The historic ndbm library corrected for opening O_WRONLY.
+ */
+ if (oflags & O_WRONLY) {
+ oflags &= ~O_WRONLY;
+ oflags |= O_RDWR;
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 4096)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 40)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, 1)) != 0 ||
+ (ret = dbp->open(dbp, NULL,
+ path, NULL, DB_HASH, __db_oflags(oflags), mode)) != 0) {
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ (void)dbp->close(dbp, 0);
+ __os_set_errno(ret);
+ return (NULL);
+ }
+
+ return ((DBM *)dbc);
+}
+
+/*
+ * Returns:
+ * Nothing.
+ */
+void
+__db_ndbm_close(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->close(dbc->dbp, 0);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_fetch(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+ _key.size = key.dsize;
+ _key.data = key.dptr;
+
+ /*
+ * Note that we can't simply use the dbc we have to do a c_get/SET,
+ * because that cursor is the one used for sequential iteration and
+ * it has to remain stable in the face of intervening gets and puts.
+ */
+ if ((ret = dbc->dbp->get(dbc->dbp, NULL, &_key, &_data, 0)) == 0) {
+ data.dptr = _data.data;
+ data.dsize = _data.size;
+ } else {
+ data.dptr = NULL;
+ data.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (data);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_firstkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_FIRST)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+datum
+__db_ndbm_nextkey(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ datum key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ memset(&_data, 0, sizeof(DBT));
+
+ if ((ret = dbc->c_get(dbc, &_key, &_data, DB_NEXT)) == 0) {
+ key.dptr = _key.data;
+ key.dsize = _key.size;
+ } else {
+ key.dptr = NULL;
+ key.dsize = 0;
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ }
+ return (key);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ */
+int
+__db_ndbm_delete(dbm, key)
+ DBM *dbm;
+ datum key;
+{
+ DBC *dbc;
+ DBT _key;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ if ((ret = dbc->dbp->del(dbc->dbp, NULL, &_key, 0)) == 0)
+ return (0);
+
+ if (ret == DB_NOTFOUND)
+ __os_set_errno(ENOENT);
+ else {
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ }
+ return (-1);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ * 1 if DBM_INSERT and entry exists
+ */
+int
+__db_ndbm_store(dbm, key, data, flags)
+ DBM *dbm;
+ datum key, data;
+ int flags;
+{
+ DBC *dbc;
+ DBT _key, _data;
+ int ret;
+
+ dbc = (DBC *)dbm;
+
+ memset(&_key, 0, sizeof(DBT));
+ _key.data = key.dptr;
+ _key.size = key.dsize;
+
+ memset(&_data, 0, sizeof(DBT));
+ _data.data = data.dptr;
+ _data.size = data.dsize;
+
+ if ((ret = dbc->dbp->put(dbc->dbp, NULL,
+ &_key, &_data, flags == DBM_INSERT ? DB_NOOVERWRITE : 0)) == 0)
+ return (0);
+
+ if (ret == DB_KEYEXIST)
+ return (1);
+
+ __os_set_errno(ret);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
+ return (-1);
+}
+
+int
+__db_ndbm_error(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_AM_DBM_ERROR));
+}
+
+int
+__db_ndbm_clearerr(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ F_CLR(dbc->dbp, DB_AM_DBM_ERROR);
+ return (0);
+}
+
+/*
+ * Returns:
+ * 1 if read-only
+ * 0 if not read-only
+ */
+int
+__db_ndbm_rdonly(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+
+ dbc = (DBC *)dbm;
+
+ return (F_ISSET(dbc->dbp, DB_AM_RDONLY) ? 1 : 0);
+}
+
+/*
+ * XXX
+ * We only have a single file descriptor that we can return, not two. Return
+ * the same one for both files. Hopefully, the user is using it for locking
+ * and picked one to use at random.
+ */
+int
+__db_ndbm_dirfno(dbm)
+ DBM *dbm;
+{
+ return (dbm_pagfno(dbm));
+}
+
+int
+__db_ndbm_pagfno(dbm)
+ DBM *dbm;
+{
+ DBC *dbc;
+ int fd;
+
+ dbc = (DBC *)dbm;
+
+ (void)dbc->dbp->fd(dbc->dbp, &fd);
+ return (fd);
+}
diff --git a/storage/bdb/dbreg/dbreg.c b/storage/bdb/dbreg/dbreg.c
new file mode 100644
index 00000000000..289fe67ed50
--- /dev/null
+++ b/storage/bdb/dbreg/dbreg.c
@@ -0,0 +1,450 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg.c,v 11.68 2002/08/28 19:05:27 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * The dbreg subsystem, as its name implies, registers database handles so
+ * that we can associate log messages with them without logging a filename
+ * or a full, unique DB ID. Instead, we assign each dbp an int32_t which is
+ * easy and cheap to log, and use this subsystem to map back and forth.
+ *
+ * Overview of how dbreg ids are managed:
+ *
+ * OPEN
+ * dbreg_setup (Creates FNAME struct.)
+ * dbreg_new_id (Assigns new ID to dbp and logs it. May be postponed
+ * until we attempt to log something else using that dbp, if the dbp
+ * was opened on a replication client.)
+ *
+ * CLOSE
+ * dbreg_close_id (Logs closure of dbp/revocation of ID.)
+ * dbreg_revoke_id (As name implies, revokes ID.)
+ * dbreg_teardown (Destroys FNAME.)
+ *
+ * RECOVERY
+ * dbreg_setup
+ * dbreg_assign_id (Assigns a particular ID we have in the log to a dbp.)
+ *
+ * sometimes: dbreg_revoke_id; dbreg_teardown
+ * other times: normal close path
+ *
+ * A note about locking:
+ *
+ * FNAME structures are referenced only by their corresponding dbp's
+ * until they have a valid id.
+ *
+ * Once they have a valid id, they must get linked into the log
+ * region list so they can get logged on checkpoints.
+ *
+ * An FNAME that may/does have a valid id must be accessed under
+ * protection of the fq_mutex, with the following exception:
+ *
+ * We don't want to have to grab the fq_mutex on every log
+ * record, and it should be safe not to do so when we're just
+ * looking at the id, because once allocated, the id should
+ * not change under a handle until the handle is closed.
+ *
+ * If a handle is closed during an attempt by another thread to
+ * log with it, well, the application doing the close deserves to
+ * go down in flames and a lot else is about to fail anyway.
+ *
+ * When in the course of logging we encounter an invalid id
+ * and go to allocate it lazily, we *do* need to check again
+ * after grabbing the mutex, because it's possible to race with
+ * another thread that has also decided that it needs to allocate
+ * a id lazily.
+ *
+ * See SR #5623 for further discussion of the new dbreg design.
+ */
+
+/*
+ * __dbreg_setup --
+ * Allocate and initialize an FNAME structure. The FNAME structures
+ * live in the log shared region and map one-to-one with open database handles.
+ * When the handle needs to be logged, the FNAME should have a valid fid
+ * allocated. If the handle currently isn't logged, it still has an FNAME
+ * entry. If we later discover that the handle needs to be logged, we can
+ * allocate a id for it later. (This happens when the handle is on a
+ * replication client that later becomes a master.)
+ *
+ * PUBLIC: int __dbreg_setup __P((DB *, const char *, u_int32_t));
+ */
+int
+__dbreg_setup(dbp, name, create_txnid)
+ DB *dbp;
+ const char *name;
+ u_int32_t create_txnid;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+ size_t len;
+ void *namep;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+
+ fnp = NULL;
+ namep = NULL;
+
+ /* Allocate an FNAME and, if necessary, a buffer for the name itself. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, sizeof(FNAME), 0, &fnp)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ memset(fnp, 0, sizeof(FNAME));
+ if (name != NULL) {
+ len = strlen(name) + 1;
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ len, 0, &namep)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
+ memcpy(namep, name, len);
+ } else
+ fnp->name_off = INVALID_ROFF;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Fill in all the remaining info that we'll need later to register
+ * the file, if we use it for logging.
+ */
+ fnp->id = DB_LOGFILEID_INVALID;
+ fnp->s_type = dbp->type;
+ memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
+ fnp->meta_pgno = dbp->meta_pgno;
+ fnp->create_txnid = create_txnid;
+
+ dbp->log_filename = fnp;
+
+ return (0);
+}
+
+/*
+ * __dbreg_teardown --
+ * Destroy a DB handle's FNAME struct.
+ *
+ * PUBLIC: int __dbreg_teardown __P((DB *));
+ */
+int
+__dbreg_teardown(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ fnp = dbp->log_filename;
+
+ /*
+ * We may not have an FNAME if we were never opened. This is not an
+ * error.
+ */
+ if (fnp == NULL)
+ return (0);
+
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (fnp->name_off != INVALID_ROFF)
+ __db_shalloc_free(dblp->reginfo.addr,
+ R_ADDR(&dblp->reginfo, fnp->name_off));
+ __db_shalloc_free(dblp->reginfo.addr, fnp);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ dbp->log_filename = NULL;
+
+ return (0);
+}
+
+/*
+ * __dbreg_new_id --
+ * Assign an unused dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_new_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_new_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN unused;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /*
+ * It's possible that after deciding we needed to call this function,
+ * someone else allocated an ID before we grabbed the lock. Check
+ * to make sure there was no race and we have something useful to do.
+ */
+ if (fnp->id != DB_LOGFILEID_INVALID) {
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (0);
+ }
+
+ /* Get an unused ID from the free list. */
+ if ((ret = __dbreg_pop_id(dbenv, &id)) != 0)
+ goto err;
+
+ /* If no ID was found, allocate a new one. */
+ if (id == DB_LOGFILEID_INVALID)
+ id = lp->fid_max++;
+
+ fnp->id = id;
+
+ /* Hook the FNAME into the list of open files. */
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ /*
+ * Log the registry. We should only request a new ID in situations
+ * where logging is reasonable.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_RECOVER));
+
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ memset(&r_name, 0, sizeof(r_name));
+ if (fnp->name_off != INVALID_ROFF) {
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1;
+ }
+ fid_dbt.data = dbp->fileid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn, &unused, 0, LOG_OPEN,
+ r_name.size == 0 ? NULL : &r_name, &fid_dbt, id, fnp->s_type,
+ fnp->meta_pgno, fnp->create_txnid)) != 0)
+ goto err;
+
+ DB_ASSERT(dbp->type == fnp->s_type);
+ DB_ASSERT(dbp->meta_pgno == fnp->meta_pgno);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_assign_id --
+ * Assign a particular dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_assign_id __P((DB *, int32_t));
+ */
+int
+__dbreg_assign_id(dbp, id)
+ DB *dbp;
+ int32_t id;
+{
+ DB *close_dbp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *close_fnp, *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ close_dbp = NULL;
+ close_fnp = NULL;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /* We should only call this on DB handles that have no ID. */
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ /*
+ * Make sure there isn't already a file open with this ID. There can
+ * be in recovery, if we're recovering across a point where an ID got
+ * reused.
+ */
+ if (__dbreg_id_to_fname(dblp, id, 1, &close_fnp) == 0) {
+ /*
+ * We want to save off any dbp we have open with this id.
+ * We can't safely close it now, because we hold the fq_mutex,
+ * but we should be able to rely on it being open in this
+ * process, and we're running recovery, so no other thread
+ * should muck with it if we just put off closing it until
+ * we're ready to return.
+ *
+ * Once we have the dbp, revoke its id; we're about to
+ * reuse it.
+ */
+ ret = __dbreg_id_to_db_int(dbenv, NULL, &close_dbp, id, 0, 0);
+ if (ret == ENOENT) {
+ ret = 0;
+ goto cont;
+ } else if (ret != 0)
+ goto err;
+
+ if ((ret = __dbreg_revoke_id(close_dbp, 1)) != 0)
+ goto err;
+ }
+
+ /*
+ * Remove this ID from the free list, if it's there, and make sure
+ * we don't allocate it anew.
+ */
+cont: if ((ret = __dbreg_pluck_id(dbenv, id)) != 0)
+ goto err;
+ if (id >= lp->fid_max)
+ lp->fid_max = id + 1;
+
+ /* Now go ahead and assign the id to our dbp. */
+ fnp->id = id;
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ /* There's nothing useful that our caller can do if this close fails. */
+ if (close_dbp != NULL)
+ (void)close_dbp->close(close_dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_revoke_id --
+ * Take a log id away from a dbp, in preparation for closing it,
+ * but without logging the close.
+ *
+ * PUBLIC: int __dbreg_revoke_id __P((DB *, int));
+ */
+int
+__dbreg_revoke_id(dbp, have_lock)
+ DB *dbp;
+ int have_lock;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ id = fnp->id;
+ fnp->id = DB_LOGFILEID_INVALID;
+
+ /* Remove the FNAME from the list of open files. */
+ SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
+
+ /* Remove this id from the dbentry table. */
+ __dbreg_rem_dbentry(dblp, id);
+
+ /* Push this id onto the free list. */
+ ret = __dbreg_push_id(dbenv, id);
+
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_close_id --
+ * Take a dbreg id away from a dbp that we're closing, and log
+ * the unregistry.
+ *
+ * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_close_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name, *dbtp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&r_name, 0, sizeof(r_name));
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size =
+ (u_int32_t)strlen((char *)r_name.data) + 1;
+ dbtp = &r_name;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn,
+ &r_unused, 0, LOG_CLOSE, dbtp, &fid_dbt, fnp->id,
+ fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0)
+ goto err;
+
+ ret = __dbreg_revoke_id(dbp, 1);
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
diff --git a/storage/bdb/dbreg/dbreg.src b/storage/bdb/dbreg/dbreg.src
new file mode 100644
index 00000000000..18429471e82
--- /dev/null
+++ b/storage/bdb/dbreg/dbreg.src
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: dbreg.src,v 10.22 2002/03/27 04:31:44 bostic Exp $
+ */
+
+PREFIX __dbreg
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * Used for registering name/id translations at open or close.
+ * opcode: register or unregister
+ * name: file name
+ * fileid: unique file id
+ * ftype: file type
+ * ftype: database type
+ * id: transaction id of the subtransaction that created the fs object
+ */
+BEGIN register 2
+ARG opcode u_int32_t lu
+DBT name DBT s
+DBT uid DBT s
+ARG fileid int32_t ld
+ARG ftype DBTYPE lx
+ARG meta_pgno db_pgno_t lu
+ARG id u_int32_t lx
+END
diff --git a/storage/bdb/dbreg/dbreg_rec.c b/storage/bdb/dbreg/dbreg_rec.c
new file mode 100644
index 00000000000..ba3ba0e06d9
--- /dev/null
+++ b/storage/bdb/dbreg/dbreg_rec.c
@@ -0,0 +1,362 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg_rec.c,v 11.108 2002/08/14 20:04:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_open_file __P((DB_ENV *,
+ DB_TXN *, __dbreg_register_args *, void *));
+
+/*
+ * PUBLIC: int __dbreg_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *dblp;
+ DB *dbp;
+ __dbreg_register_args *argp;
+ int do_close, do_open, do_rem, ret, t_ret;
+
+ dblp = dbenv->lg_handle;
+ dbp = NULL;
+
+#ifdef DEBUG_RECOVER
+ REC_PRINT(__dbreg_register_print);
+#endif
+ do_open = do_close = 0;
+ if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ switch (argp->opcode) {
+ case LOG_OPEN:
+ if ((DB_REDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CLOSE:
+ if (DB_UNDO(op))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+ case LOG_RCLOSE:
+ /*
+ * LOG_RCLOSE was generated by recover because a file
+ * was left open. The POPENFILES pass, which is run
+ * to open files to abort prepared transactions,
+ * may not include the open for this file so we
+ * open it here. Note that a normal CLOSE is
+ * not legal before the prepared transaction is
+ * committed or aborted.
+ */
+ if (DB_UNDO(op) || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CHECKPOINT:
+ if (DB_UNDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ break;
+ }
+
+ if (do_open) {
+ /*
+ * We must open the db even if the meta page is not
+ * yet written as we may be creating subdatabase.
+ */
+ if (op == DB_TXN_OPENFILES && argp->opcode != LOG_CHECKPOINT)
+ F_SET(dblp, DBLOG_FORCE_OPEN);
+
+ /*
+ * During an abort or an open pass to recover prepared txns,
+ * we need to make sure that we use the same locker id on the
+ * open. We pass the txnid along to ensure this.
+ */
+ ret = __dbreg_open_file(dbenv,
+ op == DB_TXN_ABORT || op == DB_TXN_POPENFILES ?
+ argp->txnid : NULL, argp, info);
+ if (ret == ENOENT || ret == EINVAL) {
+ /*
+ * If this is an OPEN while rolling forward, it's
+ * possible that the file was recreated since last
+ * time we got here. In that case, we've got deleted
+ * set and probably shouldn't, so we need to check
+ * for that case and possibly retry.
+ */
+ if (op == DB_TXN_FORWARD_ROLL &&
+ argp->txnid != 0 &&
+ dblp->dbentry[argp->fileid].deleted) {
+ dblp->dbentry[argp->fileid].deleted = 0;
+ ret =
+ __dbreg_open_file(dbenv, NULL, argp, info);
+ }
+ ret = 0;
+ }
+ F_CLR(dblp, DBLOG_FORCE_OPEN);
+ }
+
+ if (do_close) {
+ /*
+ * If we are undoing an open, or redoing a close,
+ * then we need to close the file.
+ *
+ * If the file is deleted, then we can just ignore this close.
+ * Otherwise, we should usually have a valid dbp we should
+ * close or whose reference count should be decremented.
+ * However, if we shut down without closing a file, we may, in
+ * fact, not have the file open, and that's OK.
+ */
+ do_rem = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ if (argp->fileid < dblp->dbentry_cnt) {
+ /*
+ * Typically, closes should match an open which means
+ * that if this is a close, there should be a valid
+ * entry in the dbentry table when we get here,
+ * however there is an exception. If this is an
+ * OPENFILES pass, then we may have started from
+ * a log file other than the first, and the
+ * corresponding open appears in an earlier file.
+ * We can ignore that case, but all others are errors.
+ */
+ dbe = &dblp->dbentry[argp->fileid];
+ if (dbe->dbp == NULL && !dbe->deleted) {
+ /* No valid entry here. */
+ if ((argp->opcode != LOG_CLOSE &&
+ argp->opcode != LOG_RCLOSE) ||
+ (op != DB_TXN_OPENFILES &&
+ op !=DB_TXN_POPENFILES)) {
+ __db_err(dbenv,
+ "Improper file close at %lu/%lu",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset);
+ ret = EINVAL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ goto done;
+ }
+
+ /* We have either an open entry or a deleted entry. */
+ if ((dbp = dbe->dbp) != NULL) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+
+ /*
+ * If we're a replication client, it's
+ * possible to get here with a dbp that
+ * the user opened, but which we later
+ * assigned a fileid to. Be sure that
+ * we only close dbps that we opened in
+ * the recovery code; they should have
+ * DB_AM_RECOVER set.
+ *
+ * The only exception is if we're aborting
+ * in a normal environment; then we might
+ * get here with a non-AM_RECOVER database.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER) ||
+ op == DB_TXN_ABORT)
+ do_rem = 1;
+ } else if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ __dbreg_rem_dbentry(dblp, argp->fileid);
+ }
+ } else
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if (do_rem) {
+ /*
+ * If we are undoing a create we'd better discard
+ * any buffers from the memory pool.
+ */
+ if (dbp != NULL && dbp->mpf != NULL && argp->id != 0) {
+ if ((ret = dbp->mpf->close(dbp->mpf,
+ DB_MPOOL_DISCARD)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ }
+
+ /*
+ * During recovery, all files are closed. On an abort,
+ * we only close the file if we opened it during the
+ * abort (DB_AM_RECOVER set), otherwise we simply do
+ * a __db_refresh. For the close case, if remove or
+ * rename has closed the file, don't request a sync,
+ * because the NULL mpf would be a problem.
+ */
+ if (dbp != NULL) {
+ if (op == DB_TXN_ABORT &&
+ !F_ISSET(dbp, DB_AM_RECOVER))
+ t_ret =
+ __db_refresh(dbp, NULL, DB_NOSYNC);
+ else
+ t_ret = dbp->close(dbp, DB_NOSYNC);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ }
+ }
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+out: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * __dbreg_open_file --
+ * Called during log_register recovery. Make sure that we have an
+ * entry in the dbentry table for this ndx. Returns 0 on success,
+ * non-zero on error.
+ */
+static int
+__dbreg_open_file(dbenv, txn, argp, info)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ __dbreg_register_args *argp;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *lp;
+ DB *dbp;
+ u_int32_t id;
+
+ lp = (DB_LOG *)dbenv->lg_handle;
+ /*
+ * We never re-open temporary files. Temp files are only
+ * useful during aborts in which case the dbp was entered
+ * when the file was registered. During recovery, we treat
+ * temp files as properly deleted files, allowing the open to
+ * fail and not reporting any errors when recovery fails to
+ * get a valid dbp from __dbreg_id_to_db.
+ */
+ if (argp->name.size == 0) {
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, argp->fileid);
+ return (ENOENT);
+ }
+
+ /*
+ * When we're opening, we have to check that the name we are opening
+ * is what we expect. If it's not, then we close the old file and
+ * open the new one.
+ */
+ MUTEX_THREAD_LOCK(dbenv, lp->mutexp);
+ if (argp->fileid < lp->dbentry_cnt)
+ dbe = &lp->dbentry[argp->fileid];
+ else
+ dbe = NULL;
+
+ if (dbe != NULL) {
+ if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ return (ENOENT);
+ }
+ if ((dbp = dbe->dbp) != NULL) {
+ if (dbp->meta_pgno != argp->meta_pgno ||
+ memcmp(dbp->fileid,
+ argp->uid.data, DB_FILE_ID_LEN) != 0) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ dbp->close(dbp, DB_NOSYNC);
+ goto reopen;
+ }
+
+ /*
+ * We should only get here if we already have the
+ * dbp from an openfiles pass, in which case, what's
+ * here had better be the same dbp.
+ */
+ DB_ASSERT(dbe->dbp == dbp);
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * This is a successful open. We need to record that
+ * in the txnlist so that we know how to handle the
+ * subtransaction that created the file system object.
+ */
+ if (argp->id != TXN_INVALID &&
+ __db_txnlist_update(dbenv, info,
+ argp->id, TXN_EXPECTED, NULL) == TXN_NOTFOUND)
+ (void)__db_txnlist_add(dbenv,
+ info, argp->id, TXN_EXPECTED, NULL);
+ return (0);
+ }
+ }
+
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * We are about to pass a recovery txn pointer into the main library.
+ * We need to make sure that any accessed fields are set appropriately.
+ */
+reopen: if (txn != NULL) {
+ id = txn->txnid;
+ memset(txn, 0, sizeof(DB_TXN));
+ txn->txnid = id;
+ txn->mgrp = dbenv->tx_handle;
+ }
+
+ return (__dbreg_do_open(dbenv, txn, lp, argp->uid.data, argp->name.data,
+ argp->ftype, argp->fileid, argp->meta_pgno, info, argp->id));
+}
diff --git a/storage/bdb/dbreg/dbreg_util.c b/storage/bdb/dbreg/dbreg_util.c
new file mode 100644
index 00000000000..0db5c640adb
--- /dev/null
+++ b/storage/bdb/dbreg/dbreg_util.c
@@ -0,0 +1,797 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg_util.c,v 11.22 2002/09/10 02:43:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_check_master __P((DB_ENV *, u_int8_t *, char *));
+
+/*
+ * __dbreg_add_dbentry --
+ * Adds a DB entry to the dbreg DB entry table.
+ *
+ * PUBLIC: int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+ */
+int
+__dbreg_add_dbentry(dbenv, dblp, dbp, ndx)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB *dbp;
+ int32_t ndx;
+{
+ int32_t i;
+ int ret;
+
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Check if we need to grow the table. Note, ndx is 0-based (the
+ * index into the DB entry table) an dbentry_cnt is 1-based, the
+ * number of available slots.
+ */
+ if (dblp->dbentry_cnt <= ndx) {
+ if ((ret = __os_realloc(dbenv,
+ (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY),
+ &dblp->dbentry)) != 0)
+ goto err;
+
+ /* Initialize the new entries. */
+ for (i = dblp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) {
+ dblp->dbentry[i].dbp = NULL;
+ dblp->dbentry[i].deleted = 0;
+ }
+ dblp->dbentry_cnt = i;
+ }
+
+ DB_ASSERT(dblp->dbentry[ndx].dbp == NULL);
+ dblp->dbentry[ndx].deleted = dbp == NULL;
+ dblp->dbentry[ndx].dbp = dbp;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_rem_dbentry
+ * Remove an entry from the DB entry table.
+ *
+ * PUBLIC: void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+ */
+void
+__dbreg_rem_dbentry(dblp, ndx)
+ DB_LOG *dblp;
+ int32_t ndx;
+{
+ MUTEX_THREAD_LOCK(dblp->dbenv, dblp->mutexp);
+ dblp->dbentry[ndx].dbp = NULL;
+ dblp->dbentry[ndx].deleted = 0;
+ MUTEX_THREAD_UNLOCK(dblp->dbenv, dblp->mutexp);
+}
+
+/*
+ * __dbreg_open_files --
+ * Put a LOG_CHECKPOINT log record for each open database.
+ *
+ * PUBLIC: int __dbreg_open_files __P((DB_ENV *));
+ */
+int
+__dbreg_open_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ DBT *dbtp, fid_dbt, t;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&t, 0, sizeof(t));
+ t.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ t.size = (u_int32_t)strlen(t.data) + 1;
+ dbtp = &t;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ /*
+ * Output LOG_CHECKPOINT records which will be
+ * processed during the OPENFILES pass of recovery.
+ * At the end of recovery we want to output the
+ * files that were open so that a future recovery
+ * run will have the correct files open during
+ * a backward pass. For this we output LOG_RCLOSE
+ * records so that the files will be closed on
+ * the forward pass.
+ */
+ if ((ret = __dbreg_register_log(dbenv,
+ NULL, &r_unused, 0,
+ F_ISSET(dblp, DBLOG_RECOVER) ? LOG_RCLOSE : LOG_CHECKPOINT,
+ dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno,
+ TXN_INVALID)) != 0)
+ break;
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_close_files --
+ * Close files that were opened by the recovery daemon. We sync the
+ * file, unless its mpf pointer has been NULLed by a db_remove or
+ * db_rename. We may not have flushed the log_register record that
+ * closes the file.
+ *
+ * PUBLIC: int __dbreg_close_files __P((DB_ENV *));
+ */
+int
+__dbreg_close_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB *dbp;
+ int ret, t_ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ /* We only want to close dbps that recovery opened. */
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ F_ISSET(dbp, DB_AM_RECOVER)) {
+ /*
+ * It's unsafe to call DB->close while holding the
+ * thread lock, because we'll call __dbreg_rem_dbentry
+ * and grab it again.
+ *
+ * Just drop it. Since dbreg ids go monotonically
+ * upward, concurrent opens should be safe, and the
+ * user should have no business closing files while
+ * we're in this loop anyway--we're in the process of
+ * making all outstanding dbps invalid.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if ((t_ret = dbp->close(dbp,
+ dbp->mpf == NULL ? DB_NOSYNC : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ }
+ dblp->dbentry[i].deleted = 0;
+ dblp->dbentry[i].dbp = NULL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_nofiles --
+ * Check that there are no open files in the process local table.
+ * Returns 0 if there are no files and EINVAL if there are any.
+ *
+ * PUBLIC: int __dbreg_nofiles __P((DB_ENV *));
+ */
+int
+__dbreg_nofiles(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ int ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ !F_ISSET(dbp, DB_AM_RECOVER)) {
+ ret = EINVAL;
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_db --
+ * Return the DB corresponding to the specified dbreg id.
+ *
+ * PUBLIC: int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int));
+ */
+int
+__dbreg_id_to_db(dbenv, txn, dbpp, ndx, inc)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc;
+{
+ return (__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, 1));
+}
+
+/*
+ * __dbreg_id_to_db_int --
+ * Return the DB corresponding to the specified dbreg id. The internal
+ * version takes a final parameter that indicates whether we should attempt
+ * to open the file if no mapping is found. During recovery, the recovery
+ * routines all want to try to open the file (and this is called from
+ * __dbreg_id_to_db), however, if we have a multi-process environment where
+ * some processes may not have the files open (e.g., XA), then we also get
+ * called from __dbreg_assign_id and it's OK if there is no mapping.
+ *
+ * PUBLIC: int __dbreg_id_to_db_int __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB **, int32_t, int, int));
+ */
+int
+__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc, tryopen;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+ int ret;
+ char *name;
+
+ ret = 0;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(inc, 0);
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Under XA, a process different than the one issuing DB operations
+ * may abort a transaction. In this case, the "recovery" routines
+ * are run by a process that does not necessarily have the file open,
+ * so we we must open the file explicitly.
+ */
+ if (ndx >= dblp->dbentry_cnt ||
+ (!dblp->dbentry[ndx].deleted && dblp->dbentry[ndx].dbp == NULL)) {
+ if (!tryopen || F_ISSET(dblp, DBLOG_RECOVER)) {
+ ret = ENOENT;
+ goto err;
+ }
+
+ /*
+ * __dbreg_id_to_fname acquires the region's fq_mutex,
+ * which we can't safely acquire while we hold the thread lock.
+ * We no longer need it anyway--the dbentry table didn't
+ * have what we needed.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+
+ if (__dbreg_id_to_fname(dblp, ndx, 0, &fname) != 0)
+ /*
+ * With transactional opens, we may actually have
+ * closed this file in the transaction in which
+ * case this will fail too. Then it's up to the
+ * caller to reopen the file.
+ */
+ return (ENOENT);
+
+ /*
+ * Note that we're relying on fname not to change, even
+ * though we released the mutex that protects it (fq_mutex)
+ * inside __dbreg_id_to_fname. This should be a safe
+ * assumption, because the other process that has the file
+ * open shouldn't be closing it while we're trying to abort.
+ */
+ name = R_ADDR(&dblp->reginfo, fname->name_off);
+
+ /*
+ * At this point, we are not holding the thread lock, so exit
+ * directly instead of going through the exit code at the
+ * bottom. If the __dbreg_do_open succeeded, then we don't need
+ * to do any of the remaining error checking at the end of this
+ * routine.
+ * XXX I am sending a NULL txnlist and 0 txnid which may be
+ * completely broken ;(
+ */
+ if ((ret = __dbreg_do_open(dbenv, txn, dblp,
+ fname->ufid, name, fname->s_type,
+ ndx, fname->meta_pgno, NULL, 0)) != 0)
+ return (ret);
+
+ *dbpp = dblp->dbentry[ndx].dbp;
+ return (0);
+ }
+
+ /*
+ * Return DB_DELETED if the file has been deleted (it's not an error).
+ */
+ if (dblp->dbentry[ndx].deleted) {
+ ret = DB_DELETED;
+ goto err;
+ }
+
+ /* It's an error if we don't have a corresponding writeable DB. */
+ if ((*dbpp = dblp->dbentry[ndx].dbp) == NULL)
+ ret = ENOENT;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed dbreg id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **));
+ */
+int
+__dbreg_id_to_fname(dblp, lid, have_lock, fnamep)
+ DB_LOG *dblp;
+ int32_t lid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->id == lid) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+/*
+ * __dbreg_fid_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed file unique id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **));
+ */
+int
+__dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)
+ DB_LOG *dblp;
+ u_int8_t *fid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (memcmp(fnp->ufid, fid, DB_FILE_ID_LEN) == 0) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_get_name
+ *
+ * Interface to get name of registered files. This is mainly diagnostic
+ * and the name passed could be transient unless there is something
+ * ensuring that the file cannot be closed.
+ *
+ * PUBLIC: int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **));
+ */
+int
+__dbreg_get_name(dbenv, fid, namep)
+ DB_ENV *dbenv;
+ u_int8_t *fid;
+ char **namep;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+
+ dblp = dbenv->lg_handle;
+
+ if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fname) == 0) {
+ *namep = R_ADDR(&dblp->reginfo, fname->name_off);
+ return (0);
+ }
+
+ return (-1);
+}
+
+/*
+ * __dbreg_do_open --
+ * Open files referenced in the log. This is the part of the open that
+ * is not protected by the thread mutex.
+ * PUBLIC: int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *,
+ * PUBLIC: char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+ */
+int
+__dbreg_do_open(dbenv,
+ txn, lp, uid, name, ftype, ndx, meta_pgno, info, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOG *lp;
+ u_int8_t *uid;
+ char *name;
+ DBTYPE ftype;
+ int32_t ndx;
+ db_pgno_t meta_pgno;
+ void *info;
+ u_int32_t id;
+{
+ DB *dbp;
+ int ret;
+ u_int32_t cstat;
+
+ if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * We can open files under a number of different scenarios.
+ * First, we can open a file during a normal txn_abort, if that file
+ * was opened and closed during the transaction (as is the master
+ * database of a sub-database).
+ * Second, we might be aborting a transaction in XA and not have
+ * it open in the process that is actually doing the abort.
+ * Third, we might be in recovery.
+ * In case 3, there is no locking, so there is no issue.
+ * In cases 1 and 2, we are guaranteed to already hold any locks
+ * that we need, since we're still in the same transaction, so by
+ * setting DB_AM_RECOVER, we guarantee that we don't log and that
+ * we don't try to acquire locks on behalf of a different locker id.
+ */
+ F_SET(dbp, DB_AM_RECOVER);
+ if (meta_pgno != PGNO_BASE_MD) {
+ memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
+ dbp->meta_pgno = meta_pgno;
+ }
+ dbp->type = ftype;
+ if ((ret = __db_dbopen(dbp, txn, name, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), meta_pgno)) == 0) {
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if ((meta_pgno != PGNO_BASE_MD &&
+ __dbreg_check_master(dbenv, uid, name) != 0) ||
+ memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ cstat = TXN_IGNORE;
+ else
+ cstat = TXN_EXPECTED;
+
+ /* Assign the specific dbreg id to this dbp. */
+ if ((ret = __dbreg_assign_id(dbp, ndx)) != 0)
+ goto err;
+
+ /*
+ * If we successfully opened this file, then we need to
+ * convey that information to the txnlist so that we
+ * know how to handle the subtransaction that created
+ * the file system object.
+ */
+ if (id != TXN_INVALID) {
+ if ((ret = __db_txnlist_update(dbenv,
+ info, id, cstat, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, cstat, NULL);
+ else if (ret > 0)
+ ret = 0;
+ }
+err: if (cstat == TXN_IGNORE)
+ goto not_right;
+ return (ret);
+ } else {
+ /* Record that the open failed in the txnlist. */
+ if (id != TXN_INVALID && (ret = __db_txnlist_update(dbenv,
+ info, id, TXN_UNEXPECTED, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, TXN_UNEXPECTED, NULL);
+ }
+not_right:
+ (void)dbp->close(dbp, 0);
+ /* Add this file as deleted. */
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, ndx);
+ return (ENOENT);
+}
+
+static int
+__dbreg_check_master(dbenv, uid, name)
+ DB_ENV *dbenv;
+ u_int8_t *uid;
+ char *name;
+{
+ DB *dbp;
+ int ret;
+
+ ret = 0;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ dbp->type = DB_BTREE;
+ F_SET(dbp, DB_AM_RECOVER);
+ ret = __db_dbopen(dbp,
+ NULL, name, NULL, 0, __db_omode("rw----"), PGNO_BASE_MD);
+
+ if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ ret = EINVAL;
+
+ (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __dbreg_lazy_id --
+ * When a replication client gets upgraded to being a replication master,
+ * it may have database handles open that have not been assigned an ID, but
+ * which have become legal to use for logging.
+ *
+ * This function lazily allocates a new ID for such a function, in a
+ * new transaction created for the purpose. We need to do this in a new
+ * transaction because we definitely wish to commit the dbreg_register, but
+ * at this point we have no way of knowing whether the log record that incited
+ * us to call this will be part of a committed transaction.
+ *
+ * PUBLIC: int __dbreg_lazy_id __P((DB *));
+ */
+int
+__dbreg_lazy_id(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ DB_ASSERT(F_ISSET(dbenv, DB_ENV_REP_MASTER));
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_new_id(dbp, txn)) != 0) {
+ (void)txn->abort(txn);
+ return (ret);
+ }
+
+ return (txn->commit(txn, DB_TXN_NOSYNC));
+}
+
+/*
+ * __dbreg_push_id and __dbreg_pop_id --
+ * Dbreg ids from closed files are kept on a stack in shared memory
+ * for recycling. (We want to reuse them as much as possible because each
+ * process keeps open files in an array by ID.) Push them to the stack and
+ * pop them from it, managing memory as appropriate.
+ *
+ * The stack is protected by the fq_mutex, and in both functions we assume
+ * that this is already locked.
+ *
+ * PUBLIC: int __dbreg_push_id __P((DB_ENV *, int32_t));
+ * PUBLIC: int __dbreg_pop_id __P((DB_ENV *, int32_t *));
+ */
+int
+__dbreg_push_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack, *newstack;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ if (lp->free_fid_stack != INVALID_ROFF)
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ else
+ stack = NULL;
+
+ /* Check if we have room on the stack. */
+ if (lp->free_fids_alloced <= lp->free_fids + 1) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0,
+ &newstack)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+
+ memcpy(newstack, stack,
+ lp->free_fids_alloced * sizeof(u_int32_t));
+ lp->free_fid_stack = R_OFFSET(&dblp->reginfo, newstack);
+ lp->free_fids_alloced += 20;
+
+ if (stack != NULL)
+ __db_shalloc_free(dblp->reginfo.addr, stack);
+
+ stack = newstack;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ DB_ASSERT(stack != NULL);
+ stack[lp->free_fids++] = id;
+ return (0);
+}
+
+int
+__dbreg_pop_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t *id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to pop? */
+ if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ *id = stack[--lp->free_fids];
+ } else
+ *id = DB_LOGFILEID_INVALID;
+
+ return (0);
+}
+
+/*
+ * __dbreg_pluck_id --
+ * Remove a particular dbreg id from the stack of free ids. This is
+ * used when we open a file, as in recovery, with a specific ID that might
+ * be on the stack.
+ *
+ * Returns success whether or not the particular id was found, and like
+ * push and pop, assumes that the fq_mutex is locked.
+ *
+ * PUBLIC: int __dbreg_pluck_id __P((DB_ENV *, int32_t));
+ */
+int
+__dbreg_pluck_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+ int i;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to look at? */
+ if (lp->free_fid_stack != INVALID_ROFF) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ for (i = 0; i < lp->free_fids; i++)
+ if (id == stack[i]) {
+ /*
+ * Found it. Overwrite it with the top
+ * id (which may harmlessly be itself),
+ * and shorten the stack by one.
+ */
+ stack[i] = stack[lp->free_fids - 1];
+ lp->free_fids--;
+ return (0);
+ }
+ }
+
+ return (0);
+}
+
+#ifdef DEBUG
+/*
+ * __dbreg_print_dblist --
+ * Display the list of files.
+ *
+ * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *));
+ */
+void
+__dbreg_print_dblist(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int del, first;
+ char *name;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (first) {
+ first = 0;
+ __db_err(dbenv,
+ "ID\t\t\tName\tType\tPgno\tTxnid\tDBP-info");
+ }
+ if (fnp->name_off == INVALID_ROFF)
+ name = "";
+ else
+ name = R_ADDR(&dblp->reginfo, fnp->name_off);
+
+ dbp = fnp->id >= dblp->dbentry_cnt ? NULL :
+ dblp->dbentry[fnp->id].dbp;
+ del = fnp->id >= dblp->dbentry_cnt ? 0 :
+ dblp->dbentry[fnp->id].deleted;
+ __db_err(dbenv, "%ld\t%s\t\t\t%s\t%lu\t%lx\t%s %d %lx %lx",
+ (long)fnp->id, name,
+ __db_dbtype_to_string(fnp->s_type),
+ (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid,
+ dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp),
+ dbp == NULL ? 0 : dbp->flags);
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+}
+#endif
diff --git a/storage/bdb/dist/Makefile.in b/storage/bdb/dist/Makefile.in
new file mode 100644
index 00000000000..a7cc0e11f34
--- /dev/null
+++ b/storage/bdb/dist/Makefile.in
@@ -0,0 +1,1397 @@
+# $Id: Makefile.in,v 11.175 2002/08/29 14:22:20 margo Exp $
+
+srcdir= @srcdir@/..
+builddir=.
+
+##################################################
+# Installation directories and permissions.
+##################################################
+prefix= @prefix@
+exec_prefix=@exec_prefix@
+bindir= @bindir@
+includedir=@includedir@
+libdir= @libdir@
+docdir= $(prefix)/docs
+
+dmode= 755
+emode= 555
+fmode= 444
+
+transform=@program_transform_name@
+
+##################################################
+# Paths for standard user-level commands.
+##################################################
+SHELL= @db_cv_path_sh@
+ar= @db_cv_path_ar@
+chmod= @db_cv_path_chmod@
+cp= @db_cv_path_cp@
+ln= @db_cv_path_ln@
+mkdir= @db_cv_path_mkdir@
+ranlib= @db_cv_path_ranlib@
+rm= @db_cv_path_rm@
+rpm= @db_cv_path_rpm@
+strip= @db_cv_path_strip@
+
+##################################################
+# General library information.
+##################################################
+DEF_LIB= @DEFAULT_LIB@
+DEF_LIB_CXX= @DEFAULT_LIB_CXX@
+INSTALLER= @INSTALLER@
+LIBTOOL= @LIBTOOL@
+
+POSTLINK= @POSTLINK@
+SOLINK= @MAKEFILE_SOLINK@
+SOFLAGS= @SOFLAGS@
+SOMAJOR= @DB_VERSION_MAJOR@
+SOVERSION= @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@
+
+##################################################
+# C API.
+##################################################
+CPPFLAGS= -I$(builddir) -I$(srcdir) -I$(srcdir)/dbinc @CPPFLAGS@
+CFLAGS= -c $(CPPFLAGS) @CFLAGS@
+CC= @MAKEFILE_CC@
+CCLINK= @MAKEFILE_CCLINK@
+
+LDFLAGS= @LDFLAGS@
+LIBS= @LIBS@
+LIBSO_LIBS= @LIBSO_LIBS@
+
+libdb= libdb.a
+libso_base= libdb
+libso= $(libso_base)-$(SOVERSION).@SOSUFFIX@
+libso_static= $(libso_base)-$(SOVERSION).a
+libso_target= $(libso_base)-$(SOVERSION).la
+libso_default= $(libso_base).@SOSUFFIX@
+libso_major= $(libso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# C++ API.
+#
+# C++ support is optional, and can be built with static or shared libraries.
+##################################################
+CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@
+CXX= @MAKEFILE_CXX@
+CXXLINK= @MAKEFILE_CXXLINK@
+XSOLINK= @MAKEFILE_XSOLINK@
+LIBXSO_LIBS= @LIBXSO_LIBS@
+
+libcxx= libdb_cxx.a
+libxso_base= libdb_cxx
+libxso= $(libxso_base)-$(SOVERSION).@SOSUFFIX@
+libxso_static= $(libxso_base)-$(SOVERSION).a
+libxso_target= $(libxso_base)-$(SOVERSION).la
+libxso_default= $(libxso_base).@SOSUFFIX@
+libxso_major= $(libxso_base)-$(SOMAJOR).@SOSUFFIX@
+
+##################################################
+# Java API.
+#
+# Java support is optional and requires shared librarires.
+##################################################
+CLASSPATH= $(JAVA_CLASSTOP)
+LIBJSO_LIBS= @LIBJSO_LIBS@
+
+JAR= @JAR@
+JAVAC= env CLASSPATH="$(CLASSPATH)" @JAVAC@
+JAVACFLAGS= @JAVACFLAGS@
+JAVA_CLASSTOP= ./classes
+JAVA_RPCCLASSES=./classes.rpc
+JAVA_SRCDIR= $(srcdir)/java/src
+JAVA_DBREL= com/sleepycat/db
+JAVA_EXREL= com/sleepycat/examples
+JAVA_RPCREL= com/sleepycat/db/rpcserver
+JAVA_DBDIR= $(JAVA_SRCDIR)/$(JAVA_DBREL)
+JAVA_EXDIR= $(JAVA_SRCDIR)/$(JAVA_EXREL)
+JAVA_RPCDIR= $(srcdir)/rpc_server/java
+
+libj_jarfile= db.jar
+libj_exjarfile= dbexamples.jar
+rpc_jarfile= dbsvc.jar
+libjso_base= libdb_java
+libjso= $(libjso_base)-$(SOVERSION).@JMODSUFFIX@
+libjso_static= $(libjso_base)-$(SOVERSION).a
+libjso_target= $(libjso_base)-$(SOVERSION).la
+libjso_default= $(libjso_base).@JMODSUFFIX@
+libjso_major= $(libjso_base)-$(SOMAJOR).@JMODSUFFIX@
+libjso_g= $(libjso_base)-$(SOVERSION)_g.@JMODSUFFIX@
+
+##################################################
+# TCL API.
+#
+# Tcl support is optional and requires shared libraries.
+##################################################
+TCFLAGS= @TCFLAGS@
+LIBTSO_LIBS= @LIBTSO_LIBS@
+libtso_base= libdb_tcl
+libtso= $(libtso_base)-$(SOVERSION).@MODSUFFIX@
+libtso_static= $(libtso_base)-$(SOVERSION).a
+libtso_target= $(libtso_base)-$(SOVERSION).la
+libtso_default= $(libtso_base).@MODSUFFIX@
+libtso_major= $(libtso_base)-$(SOMAJOR).@MODSUFFIX@
+
+##################################################
+# db_dump185 UTILITY
+#
+# The db_dump185 application should be compiled using the system's db.h file
+# (which should be a DB 1.85/1.86 include file), and the system's 1.85/1.86
+# object library. To include the right db.h, don't include -I$(builddir) on
+# the compile line. You may also need to add a local include directory and
+# local libraries, for example. Do that by adding -I options to the DB185INC
+# line, and -l options to the DB185LIB line.
+##################################################
+DB185INC= -c @CFLAGS@ -I$(srcdir) @CPPFLAGS@
+DB185LIB=
+
+##################################################
+# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED.
+##################################################
+
+##################################################
+# Object and utility lists.
+##################################################
+C_OBJS= @ADDITIONAL_OBJS@ @LTLIBOBJS@ @RPC_CLIENT_OBJS@ \
+ bt_compare@o@ bt_conv@o@ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ \
+ bt_method@o@ bt_open@o@ bt_put@o@ bt_rec@o@ bt_reclaim@o@ \
+ bt_recno@o@ bt_rsearch@o@ bt_search@o@ bt_split@o@ bt_stat@o@ \
+ bt_upgrade@o@ bt_verify@o@ btree_auto@o@ crdel_auto@o@ \
+ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ db_byteorder@o@ db_cam@o@ \
+ db_conv@o@ db_dispatch@o@ db_dup@o@ db_err@o@ db_getlong@o@ \
+ db_idspace@o@ db_iface@o@ db_join@o@ db_log2@o@ db_meta@o@ \
+ db_method@o@ db_open@o@ db_overflow@o@ db_pr@o@ db_rec@o@ \
+ db_reclaim@o@ db_rename@o@ db_remove@o@ db_ret@o@ db_salloc@o@ \
+ db_shash@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ db_vrfy@o@ \
+ db_vrfyutil@o@ dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ \
+ dbreg_util@o@ env_file@o@ env_method@o@ env_open@o@ env_recover@o@ \
+ env_region@o@ fileops_auto@o@ fop_basic@o@ fop_rec@o@ \
+ fop_util@o@ hash@o@ hash_auto@o@ hash_conv@o@ hash_dup@o@ \
+ hash_func@o@ hash_meta@o@ hash_method@o@ hash_open@o@ \
+ hash_page@o@ hash_rec@o@ hash_reclaim@o@ hash_stat@o@ \
+ hash_upgrade@o@ hash_verify@o@ hmac@o@ hsearch@o@ lock@o@ \
+ lock_deadlock@o@ lock_method@o@ lock_region@o@ lock_stat@o@ \
+ lock_util@o@ log@o@ log_archive@o@ log_compare@o@ log_get@o@ \
+ log_method@o@ log_put@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \
+ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ \
+ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ \
+ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ os_dir@o@ \
+ os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ os_id@o@ \
+ os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ os_region@o@ \
+ os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ os_seek@o@ \
+ os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ os_unlink@o@ \
+ qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ qam_method@o@ \
+ qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ qam_verify@o@ \
+ rep_method@o@ rep_record@o@ rep_region@o@ rep_util@o@ sha1@o@ \
+ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ txn_recover@o@ \
+ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ xa_db@o@ xa_map@o@
+
+CXX_OBJS=\
+ cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ \
+ cxx_lock@o@ cxx_logc@o@ cxx_mpool@o@ cxx_txn@o@
+
+JAVA_OBJS=\
+ java_Db@o@ java_DbEnv@o@ java_DbLock@o@ java_DbLogc@o@ \
+ java_DbLsn@o@ java_DbTxn@o@ java_DbUtil@o@ java_Dbc@o@ \
+ java_Dbt@o@ \
+ java_info@o@ java_locked@o@ java_util@o@ java_stat_auto@o@
+
+JAVA_DBSRCS=\
+ $(JAVA_DBDIR)/Db.java $(JAVA_DBDIR)/DbAppendRecno.java \
+ $(JAVA_DBDIR)/DbAppDispatch.java \
+ $(JAVA_DBDIR)/DbBtreeCompare.java $(JAVA_DBDIR)/DbBtreePrefix.java \
+ $(JAVA_DBDIR)/DbBtreeStat.java $(JAVA_DBDIR)/DbClient.java \
+ $(JAVA_DBDIR)/DbConstants.java $(JAVA_DBDIR)/DbDeadlockException.java \
+ $(JAVA_DBDIR)/DbDupCompare.java $(JAVA_DBDIR)/DbEnv.java \
+ $(JAVA_DBDIR)/DbEnvFeedback.java $(JAVA_DBDIR)/DbErrcall.java \
+ $(JAVA_DBDIR)/DbException.java $(JAVA_DBDIR)/DbFeedback.java \
+ $(JAVA_DBDIR)/DbHash.java $(JAVA_DBDIR)/DbHashStat.java \
+ $(JAVA_DBDIR)/DbKeyRange.java $(JAVA_DBDIR)/DbLock.java \
+ $(JAVA_DBDIR)/DbLockNotGrantedException.java \
+ $(JAVA_DBDIR)/DbLockRequest.java $(JAVA_DBDIR)/DbLockStat.java \
+ $(JAVA_DBDIR)/DbLogc.java $(JAVA_DBDIR)/DbLogStat.java \
+ $(JAVA_DBDIR)/DbLsn.java $(JAVA_DBDIR)/DbMemoryException.java \
+ $(JAVA_DBDIR)/DbMpoolFStat.java $(JAVA_DBDIR)/DbMpoolStat.java \
+ $(JAVA_DBDIR)/DbMultipleDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleIterator.java \
+ $(JAVA_DBDIR)/DbMultipleKeyDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleRecnoDataIterator.java \
+ $(JAVA_DBDIR)/DbOutputStreamErrcall.java \
+ $(JAVA_DBDIR)/DbPreplist.java $(JAVA_DBDIR)/DbQueueStat.java \
+ $(JAVA_DBDIR)/DbRepStat.java $(JAVA_DBDIR)/DbRepTransport.java \
+ $(JAVA_DBDIR)/DbRunRecoveryException.java \
+ $(JAVA_DBDIR)/DbSecondaryKeyCreate.java $(JAVA_DBDIR)/DbTxn.java \
+ $(JAVA_DBDIR)/DbTxnStat.java \
+ $(JAVA_DBDIR)/DbUtil.java $(JAVA_DBDIR)/Dbc.java $(JAVA_DBDIR)/Dbt.java
+
+JAVA_EXSRCS=\
+ $(JAVA_EXDIR)/AccessExample.java \
+ $(JAVA_EXDIR)/BtRecExample.java \
+ $(JAVA_EXDIR)/BulkAccessExample.java \
+ $(JAVA_EXDIR)/EnvExample.java \
+ $(JAVA_EXDIR)/LockExample.java \
+ $(JAVA_EXDIR)/TpcbExample.java
+
+TCL_OBJS=\
+ tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
+ tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_rep@o@ \
+ tcl_txn@o@ tcl_util@o@
+
+RPC_CLIENT_OBJS=\
+ client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \
+ gen_client_ret@o@
+
+RPC_SRV_OBJS=\
+ db_server_proc@o@ db_server_svc@o@ db_server_util@o@ \
+ gen_db_server@o@
+
+RPC_CXXSRV_OBJS=\
+ db_server_cxxproc@o@ db_server_cxxutil@o@ db_server_svc@o@ \
+ gen_db_server@o@
+
+RPC_JAVASRV_SRCS=\
+ $(JAVA_RPCDIR)/DbDispatcher.java \
+ $(JAVA_RPCDIR)/DbServer.java \
+ $(JAVA_RPCDIR)/FreeList.java \
+ $(JAVA_RPCDIR)/LocalIterator.java \
+ $(JAVA_RPCDIR)/RpcDb.java \
+ $(JAVA_RPCDIR)/RpcDbEnv.java \
+ $(JAVA_RPCDIR)/RpcDbTxn.java \
+ $(JAVA_RPCDIR)/RpcDbc.java \
+ $(JAVA_RPCDIR)/Timer.java \
+ $(JAVA_RPCDIR)/gen/DbServerStub.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_join_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_join_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_reply.java \
+ $(JAVA_RPCDIR)/gen/db_server.java
+
+UTIL_PROGS=\
+ @ADDITIONAL_PROGS@ \
+ db_archive db_checkpoint db_deadlock \
+ db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify
+
+##################################################
+# List of files installed into the library directory.
+##################################################
+LIB_INSTALL_FILE_LIST=\
+ $(libdb) \
+ $(libso) \
+ $(libso_default) \
+ $(libso_major) \
+ $(libso_static) \
+ $(libso_target) \
+ $(libcxx) \
+ $(libxso) \
+ $(libxso_default) \
+ $(libxso_major) \
+ $(libxso_static) \
+ $(libxso_target) \
+ $(libtso) \
+ $(libtso_default) \
+ $(libtso_major) \
+ $(libtso_static) \
+ $(libtso_target) \
+ $(libjso) \
+ $(libjso_default) \
+ $(libjso_g) \
+ $(libjso_major) \
+ $(libjso_static) \
+ $(libjso_target) \
+ $(libj_exjarfile) \
+ $(libj_jarfile)
+
+##################################################
+# We're building a standard library or a RPM file hierarchy, potentially
+# for Embedix. Note: "all" must be the first target in the Makefile.
+##################################################
+all: @BUILD_TARGET@ libdb.a
+
+install-strip install: all @INSTALL_TARGET@
+
+##################################################
+# Library and standard utilities build.
+##################################################
+library_build: @INSTALL_LIBS@ @ADDITIONAL_LANG@ $(UTIL_PROGS)
+
+$(libdb): $(C_OBJS)
+ $(ar) cr $@ $(C_OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libcxx): $(CXX_OBJS) $(C_OBJS)
+ $(ar) cr $@ $(CXX_OBJS) $(C_OBJS)
+ test ! -f $(ranlib) || $(ranlib) $@
+
+$(libso_target): $(C_OBJS)
+ $(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBSO_LIBS)
+
+$(libjso_target): $(JAVA_OBJS) $(C_OBJS)
+ $(SOLINK) -jnimodule $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(JAVA_OBJS) $(C_OBJS) $(LIBJSO_LIBS)
+
+$(libtso_target): $(TCL_OBJS) $(C_OBJS)
+ $(SOLINK) -module $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(TCL_OBJS) $(C_OBJS) $(LIBTSO_LIBS)
+
+$(libxso_target): $(CXX_OBJS) $(C_OBJS)
+ $(XSOLINK) $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(CXX_OBJS) $(C_OBJS) $(LIBXSO_LIBS)
+
+##################################################
+# Creating individual dependencies and actions for building class
+# files is possible, but it is very messy and error prone.
+##################################################
+java: $(libj_jarfile) $(libj_exjarfile)
+
+$(libj_jarfile): $(JAVA_DBSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_DBSRCS)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_jarfile) $(JAVA_DBREL)
+
+$(libj_exjarfile): $(libj_jarfile) $(JAVA_EXSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
+ $(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_EXSRCS)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_exjarfile) $(JAVA_EXREL)
+
+$(rpc_jarfile): $(libj_jarfile) $(RPC_JAVASRV_SRCS)
+ @test -d $(JAVA_RPCCLASSES) || \
+ ($(mkdir) -p $(JAVA_RPCCLASSES) && \
+ $(chmod) $(dmode) $(JAVA_RPCCLASSES))
+ env CLASSPATH=$(CLASSPATH):$(JAVA_RPCDIR)/oncrpc.jar \
+ @JAVAC@ -d $(JAVA_RPCCLASSES) $(JAVACFLAGS) $(RPC_JAVASRV_SRCS)
+ cd $(JAVA_RPCCLASSES) && $(JAR) cf ../$(rpc_jarfile) $(JAVA_RPCREL)
+
+
+##################################################
+# Utilities
+##################################################
+berkeley_db_svc: $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+berkeley_db_cxxsvc: $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) \
+ $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+berkeley_db_javasvc: $(rpc_jarfile)
+ echo > $@ "#!/bin/sh"
+ echo >> $@ CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar"
+ echo >> $@ LD_LIBRARY_PATH=.libs
+ echo >> $@ export CLASSPATH LD_LIBRARY_PATH
+ echo >> $@ exec java com.sleepycat.db.rpcserver.DbServer \$$@
+ chmod +x $@
+
+db_archive: db_archive@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_archive@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_checkpoint: db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_deadlock: db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump: db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_dump185: db_dump185@o@ @LTLIBOBJS@
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @LTLIBOBJS@ $(DB185LIB)
+ $(POSTLINK) $@
+
+db_load: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_printlog: db_printlog@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_printlog@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_recover: db_recover@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_recover@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_stat: db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_upgrade: db_upgrade@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_upgrade@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+db_verify: db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Library and standard utilities install.
+##################################################
+library_install: install_setup
+library_install: install_include install_lib install_utilities install_docs
+
+uninstall: uninstall_include uninstall_lib uninstall_utilities uninstall_docs
+
+install_setup:
+ @test -d $(prefix) || \
+ ($(mkdir) -p $(prefix) && $(chmod) $(dmode) $(prefix))
+
+INCDOT= db.h db_cxx.h @ADDITIONAL_INCS@
+INCINC= cxx_common.h cxx_except.h
+install_include:
+ @echo "Installing DB include files: $(includedir) ..."
+ @test -d $(includedir) || \
+ ($(mkdir) -p $(includedir) && $(chmod) $(dmode) $(includedir))
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
+ @$(cp) -p $(INCDOT) $(includedir)
+ @cd $(srcdir)/dbinc/ && $(cp) -p $(INCINC) $(includedir)
+ @cd $(includedir) && $(chmod) $(fmode) $(INCDOT) $(INCINC)
+
+uninstall_include:
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
+
+install_lib:
+ @echo "Installing DB library: $(libdir) ..."
+ @test -d $(libdir) || \
+ ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
+ @$(INSTALLER) @INSTALL_LIBS@ $(libdir)
+ @(cd $(libdir) && \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_default); \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_major); \
+ test -f $(libso_static) && $(ln) -s $(libso_static) $(libdb); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_default); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_major); \
+ test -f $(libxso_static) && $(ln) -s $(libxso_static) $(libcxx); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_default); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_default); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_g)) || true
+ @(test -f $(libj_jarfile) && \
+ $(cp) $(libj_jarfile) $(libdir) && \
+ $(chmod) $(fmode) $(libdir)/$(libj_jarfile)) || true
+
+uninstall_lib:
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
+
+install_utilities:
+ echo "Installing DB utilities: $(bindir) ..."
+ @test -d $(bindir) || \
+ ($(mkdir) -p $(bindir) && $(chmod) $(dmode) $(bindir))
+ @for i in $(UTIL_PROGS); do \
+ $(rm) -f $(bindir)/$$i $(bindir)/$$i.exe; \
+ test -f $$i.exe && i=$$i.exe || true; \
+ $(INSTALLER) $$i $(bindir)/$$i; \
+ test -f $(strip) && $(strip) $(bindir)/$$i || true; \
+ $(chmod) $(emode) $(bindir)/$$i; \
+ done
+
+uninstall_utilities:
+ @(cd $(bindir); for i in $(UTIL_PROGS); do \
+ $(rm) -f $$i $$i.exe; \
+ done)
+
+DOCLIST=\
+ api_c api_cxx api_java api_tcl images index.html ref reftoc.html \
+ sleepycat utility
+install_docs:
+ @echo "Installing documentation: $(docdir) ..."
+ @test -d $(docdir) || \
+ ($(mkdir) -p $(docdir) && $(chmod) $(dmode) $(docdir))
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
+ @cd $(srcdir)/docs && $(cp) -pr $(DOCLIST) $(docdir)/
+
+uninstall_docs:
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
+
+##################################################
+# RPM, Embedix build and install.
+##################################################
+RPM_ARCHIVE=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+rpm_build:
+ @$(rm) -rf BUILD RPMS SOURCES SPECS SRPMS RPM_INSTALL
+ @$(mkdir) -p BUILD && $(chmod) $(dmode) BUILD
+ @$(mkdir) -p RPMS/i386 && $(chmod) $(dmode) RPMS RPMS/i386
+ @$(mkdir) -p SOURCES && $(chmod) $(dmode) SOURCES
+ @$(mkdir) -p SPECS && $(chmod) $(dmode) SPECS
+ @$(mkdir) -p SRPMS && $(chmod) $(dmode) SRPMS
+ $(cp) @db_cv_path_rpm_archive@/$(RPM_ARCHIVE) SOURCES/
+ $(cp) db.spec SPECS/db.spec
+ $(rpm) --rcfile @CONFIGURATION_PATH@/rpmrc -ba SPECS/db.spec
+
+rpm_install:
+
+RPM_SRPMS=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-1.src.rpm
+embedix_install:
+ $(cp) db.ecd @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(chmod) $(fmode) @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(cp) SRPMS/$(RPM_SRPMS) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+ $(chmod) $(fmode) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+
+##################################################
+# Remaining standard Makefile targets.
+##################################################
+CLEAN_LIST=\
+ berkeley_db_svc berkeley_db_cxxsvc berkeley_db_javasvc \
+ db_dump185 db_perf dbs bench_001 \
+ ex_access ex_apprec ex_btrec ex_dbclient ex_env ex_lock ex_mpool \
+ ex_repquote ex_thread ex_tpcb excxx_access excxx_btrec excxx_env \
+ excxx_lock excxx_mpool excxx_tpcb rpmrc
+
+mostly-clean clean:
+ $(rm) -rf $(C_OBJS)
+ $(rm) -rf $(CXX_OBJS) $(JAVA_OBJS) $(TCL_OBJS)
+ $(rm) -rf $(RPC_CLIENT_OBJS) $(RPC_SRV_OBJS) $(RPC_CXXSRV_OBJS)
+ $(rm) -rf $(UTIL_PROGS) *.exe $(CLEAN_LIST)
+ $(rm) -rf $(JAVA_CLASSTOP) $(JAVA_RPCCLASSES) $(rpc_jarfile)
+ $(rm) -rf tags *@o@ *.o *.o.lock *.lo core *.core
+ $(rm) -rf ALL.OUT.* BUILD PARALLEL_TESTDIR.* RPMS RPM_INSTALL
+ $(rm) -rf RUN_LOG RUNQUEUE SOURCES SPECS SRPMS TESTDIR TESTDIR.A
+ $(rm) -rf logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST)
+
+REALCLEAN_LIST=\
+ Makefile confdefs.h config.cache config.log config.status db.h \
+ db.spec db185_int.h db_185.h db_config.h db_cxx.h db_int.h \
+ db_int_def.h include.tcl
+
+distclean maintainer-clean realclean: clean
+ $(rm) -rf $(REALCLEAN_LIST)
+ $(rm) -rf libtool
+
+check depend dvi info obj TAGS:
+ @echo "$@: make target not supported" && true
+
+dist:
+ @echo "$@: make target not supported" && false
+
+##################################################
+# Multi-threaded testers, benchmarks.
+##################################################
+dbs@o@: $(srcdir)/test_server/dbs.c
+ $(CC) $(CFLAGS) $?
+dbs_am@o@: $(srcdir)/test_server/dbs_am.c
+ $(CC) $(CFLAGS) $?
+dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c
+ $(CC) $(CFLAGS) $?
+dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c
+ $(CC) $(CFLAGS) $?
+dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c
+ $(CC) $(CFLAGS) $?
+dbs_log@o@: $(srcdir)/test_server/dbs_log.c
+ $(CC) $(CFLAGS) $?
+dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c
+ $(CC) $(CFLAGS) $?
+dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c
+ $(CC) $(CFLAGS) $?
+dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c
+ $(CC) $(CFLAGS) $?
+dbs_util@o@: $(srcdir)/test_server/dbs_util.c
+ $(CC) $(CFLAGS) $?
+dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c
+ $(CC) $(CFLAGS) $?
+DBS_OBJS=\
+ dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \
+ dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \
+ dbs_yield@o@
+dbs: $(DBS_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+db_perf@o@: $(srcdir)/test_perf/db_perf.c
+ $(CC) $(CFLAGS) $?
+perf_cache_check@o@: $(srcdir)/test_perf/perf_cache_check.c
+ $(CC) $(CFLAGS) $?
+perf_checkpoint@o@: $(srcdir)/test_perf/perf_checkpoint.c
+ $(CC) $(CFLAGS) $?
+perf_config@o@: $(srcdir)/test_perf/perf_config.c
+ $(CC) $(CFLAGS) $?
+perf_dbs@o@: $(srcdir)/test_perf/perf_dbs.c
+ $(CC) $(CFLAGS) $?
+perf_debug@o@: $(srcdir)/test_perf/perf_debug.c
+ $(CC) $(CFLAGS) $?
+perf_file@o@: $(srcdir)/test_perf/perf_file.c
+ $(CC) $(CFLAGS) $?
+perf_key@o@: $(srcdir)/test_perf/perf_key.c
+ $(CC) $(CFLAGS) $?
+perf_log@o@: $(srcdir)/test_perf/perf_log.c
+ $(CC) $(CFLAGS) $?
+perf_misc@o@: $(srcdir)/test_perf/perf_misc.c
+ $(CC) $(CFLAGS) $?
+perf_op@o@: $(srcdir)/test_perf/perf_op.c
+ $(CC) $(CFLAGS) $?
+perf_parse@o@: $(srcdir)/test_perf/perf_parse.c
+ $(CC) $(CFLAGS) $?
+perf_rand@o@: $(srcdir)/test_perf/perf_rand.c
+ $(CC) $(CFLAGS) $?
+perf_spawn@o@: $(srcdir)/test_perf/perf_spawn.c
+ $(CC) $(CFLAGS) $?
+perf_thread@o@: $(srcdir)/test_perf/perf_thread.c
+ $(CC) $(CFLAGS) $?
+perf_trickle@o@: $(srcdir)/test_perf/perf_trickle.c
+ $(CC) $(CFLAGS) $?
+perf_txn@o@: $(srcdir)/test_perf/perf_txn.c
+ $(CC) $(CFLAGS) $?
+perf_util@o@: $(srcdir)/test_perf/perf_util.c
+ $(CC) $(CFLAGS) $?
+perf_vx@o@: $(srcdir)/test_perf/perf_vx.c
+ $(CC) $(CFLAGS) $?
+DBPERF_OBJS=\
+ db_perf@o@ perf_cache_check@o@ perf_checkpoint@o@ perf_config@o@ \
+ perf_dbs@o@ perf_debug@o@ perf_file@o@ perf_key@o@ perf_log@o@ \
+ perf_misc@o@ perf_op@o@ perf_parse@o@ perf_rand@o@ \
+ perf_spawn@o@ perf_thread@o@ perf_trickle@o@ perf_txn@o@ \
+ perf_util@o@ perf_vx@o@
+db_perf: $(DBPERF_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+tm@o@: $(srcdir)/mutex/tm.c
+ $(CC) $(CFLAGS) $?
+tm: tm@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C.
+##################################################
+bench_001@o@: $(srcdir)/examples_c/bench_001.c
+ $(CC) $(CFLAGS) $?
+bench_001: bench_001@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) bench_001@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_access@o@: $(srcdir)/examples_c/ex_access.c
+ $(CC) $(CFLAGS) $?
+ex_access: ex_access@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_apprec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec.c
+ $(CC) $(CFLAGS) $?
+ex_apprec_auto@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_auto.c
+ $(CC) $(CFLAGS) $?
+ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c
+ $(CC) $(CFLAGS) $?
+EX_APPREC_OBJS=ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_rec@o@
+ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+
+ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c
+ $(CC) $(CFLAGS) $?
+ex_btrec: ex_btrec@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c
+ $(CC) $(CFLAGS) $?
+ex_dbclient: ex_dbclient@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_env@o@: $(srcdir)/examples_c/ex_env.c
+ $(CC) $(CFLAGS) $?
+ex_env: ex_env@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_lock@o@: $(srcdir)/examples_c/ex_lock.c
+ $(CC) $(CFLAGS) $?
+ex_lock: ex_lock@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c
+ $(CC) $(CFLAGS) $?
+ex_mpool: ex_mpool@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_rq_client@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_client.c
+ $(CC) $(CFLAGS) $?
+ex_rq_main@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_main.c
+ $(CC) $(CFLAGS) $?
+ex_rq_master@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_master.c
+ $(CC) $(CFLAGS) $?
+ex_rq_net@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_net.c
+ $(CC) $(CFLAGS) $?
+ex_rq_util@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_util.c
+ $(CC) $(CFLAGS) $?
+EX_RQ_OBJS=\
+ ex_rq_client@o@ ex_rq_main@o@ ex_rq_master@o@ ex_rq_net@o@ ex_rq_util@o@
+ex_repquote: $(EX_RQ_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+ex_thread@o@: $(srcdir)/examples_c/ex_thread.c
+ $(CC) $(CFLAGS) $?
+ex_thread: ex_thread@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) ex_thread@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c
+ $(CC) $(CFLAGS) $?
+ex_tpcb: ex_tpcb@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C++.
+##################################################
+AccessExample@o@: $(srcdir)/examples_cxx/AccessExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_access: AccessExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) AccessExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+BtRecExample@o@: $(srcdir)/examples_cxx/BtRecExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_btrec: BtRecExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) BtRecExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+EnvExample@o@: $(srcdir)/examples_cxx/EnvExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_env: EnvExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) EnvExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+LockExample@o@: $(srcdir)/examples_cxx/LockExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_lock: LockExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) LockExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+MpoolExample@o@: $(srcdir)/examples_cxx/MpoolExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_mpool: MpoolExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) MpoolExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+TpcbExample@o@: $(srcdir)/examples_cxx/TpcbExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_tpcb: TpcbExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) TpcbExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# C API build rules.
+##################################################
+aes_method@o@: $(srcdir)/crypto/aes_method.c
+ $(CC) $(CFLAGS) $?
+bt_compare@o@: $(srcdir)/btree/bt_compare.c
+ $(CC) $(CFLAGS) $?
+bt_conv@o@: $(srcdir)/btree/bt_conv.c
+ $(CC) $(CFLAGS) $?
+bt_curadj@o@: $(srcdir)/btree/bt_curadj.c
+ $(CC) $(CFLAGS) $?
+bt_cursor@o@: $(srcdir)/btree/bt_cursor.c
+ $(CC) $(CFLAGS) $?
+bt_delete@o@: $(srcdir)/btree/bt_delete.c
+ $(CC) $(CFLAGS) $?
+bt_method@o@: $(srcdir)/btree/bt_method.c
+ $(CC) $(CFLAGS) $?
+bt_open@o@: $(srcdir)/btree/bt_open.c
+ $(CC) $(CFLAGS) $?
+bt_put@o@: $(srcdir)/btree/bt_put.c
+ $(CC) $(CFLAGS) $?
+bt_rec@o@: $(srcdir)/btree/bt_rec.c
+ $(CC) $(CFLAGS) $?
+bt_reclaim@o@: $(srcdir)/btree/bt_reclaim.c
+ $(CC) $(CFLAGS) $?
+bt_recno@o@: $(srcdir)/btree/bt_recno.c
+ $(CC) $(CFLAGS) $?
+bt_rsearch@o@: $(srcdir)/btree/bt_rsearch.c
+ $(CC) $(CFLAGS) $?
+bt_search@o@: $(srcdir)/btree/bt_search.c
+ $(CC) $(CFLAGS) $?
+bt_split@o@: $(srcdir)/btree/bt_split.c
+ $(CC) $(CFLAGS) $?
+bt_stack@o@: $(srcdir)/btree/bt_stack.c
+ $(CC) $(CFLAGS) $?
+bt_stat@o@: $(srcdir)/btree/bt_stat.c
+ $(CC) $(CFLAGS) $?
+bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c
+ $(CC) $(CFLAGS) $?
+bt_verify@o@: $(srcdir)/btree/bt_verify.c
+ $(CC) $(CFLAGS) $?
+btree_auto@o@: $(srcdir)/btree/btree_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_auto@o@: $(srcdir)/db/crdel_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_rec@o@: $(srcdir)/db/crdel_rec.c
+ $(CC) $(CFLAGS) $?
+crypto@o@: $(srcdir)/crypto/crypto.c
+ $(CC) $(CFLAGS) $?
+db185@o@: $(srcdir)/db185/db185.c
+ $(CC) $(CFLAGS) $?
+db@o@: $(srcdir)/db/db.c
+ $(CC) $(CFLAGS) $?
+db_am@o@: $(srcdir)/db/db_am.c
+ $(CC) $(CFLAGS) $?
+db_auto@o@: $(srcdir)/db/db_auto.c
+ $(CC) $(CFLAGS) $?
+db_byteorder@o@: $(srcdir)/common/db_byteorder.c
+ $(CC) $(CFLAGS) $?
+db_cam@o@: $(srcdir)/db/db_cam.c
+ $(CC) $(CFLAGS) $?
+db_conv@o@: $(srcdir)/db/db_conv.c
+ $(CC) $(CFLAGS) $?
+db_dispatch@o@: $(srcdir)/db/db_dispatch.c
+ $(CC) $(CFLAGS) $?
+db_dup@o@: $(srcdir)/db/db_dup.c
+ $(CC) $(CFLAGS) $?
+db_err@o@: $(srcdir)/common/db_err.c
+ $(CC) $(CFLAGS) $?
+db_getlong@o@: $(srcdir)/common/db_getlong.c
+ $(CC) $(CFLAGS) $?
+db_idspace@o@: $(srcdir)/common/db_idspace.c
+ $(CC) $(CFLAGS) $?
+db_iface@o@: $(srcdir)/db/db_iface.c
+ $(CC) $(CFLAGS) $?
+db_join@o@: $(srcdir)/db/db_join.c
+ $(CC) $(CFLAGS) $?
+db_log2@o@: $(srcdir)/common/db_log2.c
+ $(CC) $(CFLAGS) $?
+db_meta@o@: $(srcdir)/db/db_meta.c
+ $(CC) $(CFLAGS) $?
+db_method@o@: $(srcdir)/db/db_method.c
+ $(CC) $(CFLAGS) $?
+db_open@o@: $(srcdir)/db/db_open.c
+ $(CC) $(CFLAGS) $?
+db_overflow@o@: $(srcdir)/db/db_overflow.c
+ $(CC) $(CFLAGS) $?
+db_pr@o@: $(srcdir)/db/db_pr.c
+ $(CC) $(CFLAGS) $?
+db_rec@o@: $(srcdir)/db/db_rec.c
+ $(CC) $(CFLAGS) $?
+db_reclaim@o@: $(srcdir)/db/db_reclaim.c
+ $(CC) $(CFLAGS) $?
+db_rename@o@: $(srcdir)/db/db_rename.c
+ $(CC) $(CFLAGS) $?
+db_remove@o@: $(srcdir)/db/db_remove.c
+ $(CC) $(CFLAGS) $?
+db_ret@o@: $(srcdir)/db/db_ret.c
+ $(CC) $(CFLAGS) $?
+db_salloc@o@: $(srcdir)/env/db_salloc.c
+ $(CC) $(CFLAGS) $?
+db_shash@o@: $(srcdir)/env/db_shash.c
+ $(CC) $(CFLAGS) $?
+db_truncate@o@: $(srcdir)/db/db_truncate.c
+ $(CC) $(CFLAGS) $?
+db_upg@o@: $(srcdir)/db/db_upg.c
+ $(CC) $(CFLAGS) $?
+db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c
+ $(CC) $(CFLAGS) $?
+db_vrfy@o@: $(srcdir)/db/db_vrfy.c
+ $(CC) $(CFLAGS) $?
+db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c
+ $(CC) $(CFLAGS) $?
+dbm@o@: $(srcdir)/dbm/dbm.c
+ $(CC) $(CFLAGS) $?
+dbreg@o@: $(srcdir)/dbreg/dbreg.c
+ $(CC) $(CFLAGS) $?
+dbreg_auto@o@: $(srcdir)/dbreg/dbreg_auto.c
+ $(CC) $(CFLAGS) $?
+dbreg_rec@o@: $(srcdir)/dbreg/dbreg_rec.c
+ $(CC) $(CFLAGS) $?
+dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c
+ $(CC) $(CFLAGS) $?
+env_file@o@: $(srcdir)/env/env_file.c
+ $(CC) $(CFLAGS) $?
+env_method@o@: $(srcdir)/env/env_method.c
+ $(CC) $(CFLAGS) $?
+env_open@o@: $(srcdir)/env/env_open.c
+ $(CC) $(CFLAGS) $?
+env_recover@o@: $(srcdir)/env/env_recover.c
+ $(CC) $(CFLAGS) $?
+env_region@o@: $(srcdir)/env/env_region.c
+ $(CC) $(CFLAGS) $?
+fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c
+ $(CC) $(CFLAGS) $?
+fop_basic@o@: $(srcdir)/fileops/fop_basic.c
+ $(CC) $(CFLAGS) $?
+fop_rec@o@: $(srcdir)/fileops/fop_rec.c
+ $(CC) $(CFLAGS) $?
+fop_util@o@: $(srcdir)/fileops/fop_util.c
+ $(CC) $(CFLAGS) $?
+hash@o@: $(srcdir)/hash/hash.c
+ $(CC) $(CFLAGS) $?
+hash_auto@o@: $(srcdir)/hash/hash_auto.c
+ $(CC) $(CFLAGS) $?
+hash_conv@o@: $(srcdir)/hash/hash_conv.c
+ $(CC) $(CFLAGS) $?
+hash_dup@o@: $(srcdir)/hash/hash_dup.c
+ $(CC) $(CFLAGS) $?
+hash_func@o@: $(srcdir)/hash/hash_func.c
+ $(CC) $(CFLAGS) $?
+hash_meta@o@: $(srcdir)/hash/hash_meta.c
+ $(CC) $(CFLAGS) $?
+hash_method@o@: $(srcdir)/hash/hash_method.c
+ $(CC) $(CFLAGS) $?
+hash_open@o@: $(srcdir)/hash/hash_open.c
+ $(CC) $(CFLAGS) $?
+hash_page@o@: $(srcdir)/hash/hash_page.c
+ $(CC) $(CFLAGS) $?
+hash_rec@o@: $(srcdir)/hash/hash_rec.c
+ $(CC) $(CFLAGS) $?
+hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c
+ $(CC) $(CFLAGS) $?
+hash_stat@o@: $(srcdir)/hash/hash_stat.c
+ $(CC) $(CFLAGS) $?
+hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c
+ $(CC) $(CFLAGS) $?
+hash_verify@o@: $(srcdir)/hash/hash_verify.c
+ $(CC) $(CFLAGS) $?
+hmac@o@: $(srcdir)/hmac/hmac.c
+ $(CC) $(CFLAGS) $?
+hsearch@o@: $(srcdir)/hsearch/hsearch.c
+ $(CC) $(CFLAGS) $?
+lock@o@: $(srcdir)/lock/lock.c
+ $(CC) $(CFLAGS) $?
+lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
+ $(CC) $(CFLAGS) $?
+lock_method@o@:$(srcdir)/lock/lock_method.c
+ $(CC) $(CFLAGS) $?
+lock_region@o@:$(srcdir)/lock/lock_region.c
+ $(CC) $(CFLAGS) $?
+lock_stat@o@:$(srcdir)/lock/lock_stat.c
+ $(CC) $(CFLAGS) $?
+lock_util@o@:$(srcdir)/lock/lock_util.c
+ $(CC) $(CFLAGS) $?
+log@o@: $(srcdir)/log/log.c
+ $(CC) $(CFLAGS) $?
+log_archive@o@: $(srcdir)/log/log_archive.c
+ $(CC) $(CFLAGS) $?
+log_compare@o@: $(srcdir)/log/log_compare.c
+ $(CC) $(CFLAGS) $?
+log_get@o@: $(srcdir)/log/log_get.c
+ $(CC) $(CFLAGS) $?
+log_method@o@: $(srcdir)/log/log_method.c
+ $(CC) $(CFLAGS) $?
+log_put@o@: $(srcdir)/log/log_put.c
+ $(CC) $(CFLAGS) $?
+mp_alloc@o@: $(srcdir)/mp/mp_alloc.c
+ $(CC) $(CFLAGS) $?
+mp_bh@o@: $(srcdir)/mp/mp_bh.c
+ $(CC) $(CFLAGS) $?
+mp_fget@o@: $(srcdir)/mp/mp_fget.c
+ $(CC) $(CFLAGS) $?
+mp_fopen@o@: $(srcdir)/mp/mp_fopen.c
+ $(CC) $(CFLAGS) $?
+mp_fput@o@: $(srcdir)/mp/mp_fput.c
+ $(CC) $(CFLAGS) $?
+mp_fset@o@: $(srcdir)/mp/mp_fset.c
+ $(CC) $(CFLAGS) $?
+mp_method@o@: $(srcdir)/mp/mp_method.c
+ $(CC) $(CFLAGS) $?
+mp_region@o@: $(srcdir)/mp/mp_region.c
+ $(CC) $(CFLAGS) $?
+mp_register@o@: $(srcdir)/mp/mp_register.c
+ $(CC) $(CFLAGS) $?
+mp_stat@o@: $(srcdir)/mp/mp_stat.c
+ $(CC) $(CFLAGS) $?
+mp_sync@o@: $(srcdir)/mp/mp_sync.c
+ $(CC) $(CFLAGS) $?
+mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
+ $(CC) $(CFLAGS) $?
+mt19937db@o@: $(srcdir)/crypto/mersenne/mt19937db.c
+ $(CC) $(CFLAGS) $?
+mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
+ $(CC) $(CFLAGS) $?
+mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
+ $(CC) $(CFLAGS) $?
+mut_tas@o@: $(srcdir)/mutex/mut_tas.c
+ $(CC) $(CFLAGS) $?
+mutex@o@: $(srcdir)/mutex/mutex.c
+ $(CC) $(CFLAGS) $?
+os_abs@o@: $(srcdir)/os/os_abs.c
+ $(CC) $(CFLAGS) $?
+os_alloc@o@: $(srcdir)/os/os_alloc.c
+ $(CC) $(CFLAGS) $?
+os_clock@o@: $(srcdir)/os/os_clock.c
+ $(CC) $(CFLAGS) $?
+os_config@o@: $(srcdir)/os/os_config.c
+ $(CC) $(CFLAGS) $?
+os_dir@o@: $(srcdir)/os/os_dir.c
+ $(CC) $(CFLAGS) $?
+os_errno@o@: $(srcdir)/os/os_errno.c
+ $(CC) $(CFLAGS) $?
+os_fid@o@: $(srcdir)/os/os_fid.c
+ $(CC) $(CFLAGS) $?
+os_fsync@o@: $(srcdir)/os/os_fsync.c
+ $(CC) $(CFLAGS) $?
+os_id@o@: $(srcdir)/os/os_id.c
+ $(CC) $(CFLAGS) $?
+os_handle@o@: $(srcdir)/os/os_handle.c
+ $(CC) $(CFLAGS) $?
+os_map@o@: $(srcdir)/os/os_map.c
+ $(CC) $(CFLAGS) $?
+os_method@o@: $(srcdir)/os/os_method.c
+ $(CC) $(CFLAGS) $?
+os_oflags@o@: $(srcdir)/os/os_oflags.c
+ $(CC) $(CFLAGS) $?
+os_open@o@: $(srcdir)/os/os_open.c
+ $(CC) $(CFLAGS) $?
+os_region@o@: $(srcdir)/os/os_region.c
+ $(CC) $(CFLAGS) $?
+os_rename@o@: $(srcdir)/os/os_rename.c
+ $(CC) $(CFLAGS) $?
+os_root@o@: $(srcdir)/os/os_root.c
+ $(CC) $(CFLAGS) $?
+os_rpath@o@: $(srcdir)/os/os_rpath.c
+ $(CC) $(CFLAGS) $?
+os_rw@o@: $(srcdir)/os/os_rw.c
+ $(CC) $(CFLAGS) $?
+os_seek@o@: $(srcdir)/os/os_seek.c
+ $(CC) $(CFLAGS) $?
+os_sleep@o@: $(srcdir)/os/os_sleep.c
+ $(CC) $(CFLAGS) $?
+os_spin@o@: $(srcdir)/os/os_spin.c
+ $(CC) $(CFLAGS) $?
+os_stat@o@: $(srcdir)/os/os_stat.c
+ $(CC) $(CFLAGS) $?
+os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c
+ $(CC) $(CFLAGS) $?
+os_unlink@o@: $(srcdir)/os/os_unlink.c
+ $(CC) $(CFLAGS) $?
+qam@o@: $(srcdir)/qam/qam.c
+ $(CC) $(CFLAGS) $?
+qam_auto@o@: $(srcdir)/qam/qam_auto.c
+ $(CC) $(CFLAGS) $?
+qam_conv@o@: $(srcdir)/qam/qam_conv.c
+ $(CC) $(CFLAGS) $?
+qam_files@o@: $(srcdir)/qam/qam_files.c
+ $(CC) $(CFLAGS) $?
+qam_method@o@: $(srcdir)/qam/qam_method.c
+ $(CC) $(CFLAGS) $?
+qam_open@o@: $(srcdir)/qam/qam_open.c
+ $(CC) $(CFLAGS) $?
+qam_rec@o@: $(srcdir)/qam/qam_rec.c
+ $(CC) $(CFLAGS) $?
+qam_stat@o@: $(srcdir)/qam/qam_stat.c
+ $(CC) $(CFLAGS) $?
+qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c
+ $(CC) $(CFLAGS) $?
+qam_verify@o@: $(srcdir)/qam/qam_verify.c
+ $(CC) $(CFLAGS) $?
+rep_method@o@: $(srcdir)/rep/rep_method.c
+ $(CC) $(CFLAGS) $?
+rep_record@o@: $(srcdir)/rep/rep_record.c
+ $(CC) $(CFLAGS) $?
+rep_region@o@: $(srcdir)/rep/rep_region.c
+ $(CC) $(CFLAGS) $?
+rep_util@o@: $(srcdir)/rep/rep_util.c
+ $(CC) $(CFLAGS) $?
+rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c
+ $(CC) $(CFLAGS) $?
+rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c
+ $(CC) $(CFLAGS) $?
+sha1@o@: $(srcdir)/hmac/sha1.c
+ $(CC) $(CFLAGS) $?
+txn@o@: $(srcdir)/txn/txn.c
+ $(CC) $(CFLAGS) $?
+txn_auto@o@: $(srcdir)/txn/txn_auto.c
+ $(CC) $(CFLAGS) $?
+txn_method@o@: $(srcdir)/txn/txn_method.c
+ $(CC) $(CFLAGS) $?
+txn_rec@o@: $(srcdir)/txn/txn_rec.c
+ $(CC) $(CFLAGS) $?
+txn_recover@o@: $(srcdir)/txn/txn_recover.c
+ $(CC) $(CFLAGS) $?
+txn_region@o@: $(srcdir)/txn/txn_region.c
+ $(CC) $(CFLAGS) $?
+txn_stat@o@: $(srcdir)/txn/txn_stat.c
+ $(CC) $(CFLAGS) $?
+txn_util@o@: $(srcdir)/txn/txn_util.c
+ $(CC) $(CFLAGS) $?
+util_cache@o@: $(srcdir)/common/util_cache.c
+ $(CC) $(CFLAGS) $?
+util_log@o@: $(srcdir)/common/util_log.c
+ $(CC) $(CFLAGS) $?
+util_sig@o@: $(srcdir)/common/util_sig.c
+ $(CC) $(CFLAGS) $?
+uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s
+ $(AS) $(ASFLAGS) -o $@ $?
+xa@o@: $(srcdir)/xa/xa.c
+ $(CC) $(CFLAGS) $?
+xa_db@o@: $(srcdir)/xa/xa_db.c
+ $(CC) $(CFLAGS) $?
+xa_map@o@: $(srcdir)/xa/xa_map.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# C++ API build rules.
+##################################################
+cxx_db@o@: $(srcdir)/cxx/cxx_db.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbc@o@: $(srcdir)/cxx/cxx_dbc.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbt@o@: $(srcdir)/cxx/cxx_dbt.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_env@o@: $(srcdir)/cxx/cxx_env.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_except@o@: $(srcdir)/cxx/cxx_except.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_lock@o@: $(srcdir)/cxx/cxx_lock.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_logc@o@: $(srcdir)/cxx/cxx_logc.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp
+ $(CXX) $(CXXFLAGS) $?
+
+##################################################
+# Java API build rules.
+##################################################
+java_Db@o@::$(srcdir)/libdb_java/java_Db.c
+ $(CC) $(CFLAGS) $?
+java_DbEnv@o@: $(srcdir)/libdb_java/java_DbEnv.c
+ $(CC) $(CFLAGS) $?
+java_DbLock@o@: $(srcdir)/libdb_java/java_DbLock.c
+ $(CC) $(CFLAGS) $?
+java_DbLogc@o@: $(srcdir)/libdb_java/java_DbLogc.c
+ $(CC) $(CFLAGS) $?
+java_DbLsn@o@: $(srcdir)/libdb_java/java_DbLsn.c
+ $(CC) $(CFLAGS) $?
+java_DbTxn@o@: $(srcdir)/libdb_java/java_DbTxn.c
+ $(CC) $(CFLAGS) $?
+java_DbUtil@o@: $(srcdir)/libdb_java/java_DbUtil.c
+ $(CC) $(CFLAGS) $?
+java_Dbc@o@: $(srcdir)/libdb_java/java_Dbc.c
+ $(CC) $(CFLAGS) $?
+java_Dbt@o@: $(srcdir)/libdb_java/java_Dbt.c
+ $(CC) $(CFLAGS) $?
+java_info@o@: $(srcdir)/libdb_java/java_info.c
+ $(CC) $(CFLAGS) $?
+java_locked@o@: $(srcdir)/libdb_java/java_locked.c
+ $(CC) $(CFLAGS) $?
+java_util@o@: $(srcdir)/libdb_java/java_util.c
+ $(CC) $(CFLAGS) $?
+java_stat_auto@o@: $(srcdir)/libdb_java/java_stat_auto.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# Tcl API build rules.
+##################################################
+tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db@o@: $(srcdir)/tcl/tcl_db.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_db_pkg@o@: $(srcdir)/tcl/tcl_db_pkg.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_dbcursor@o@: $(srcdir)/tcl/tcl_dbcursor.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_env@o@: $(srcdir)/tcl/tcl_env.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_internal@o@: $(srcdir)/tcl/tcl_internal.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_lock@o@: $(srcdir)/tcl/tcl_lock.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_log@o@: $(srcdir)/tcl/tcl_log.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_util@o@: $(srcdir)/tcl/tcl_util.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
+
+##################################################
+# RPC build rules.
+##################################################
+# RPC client files
+client@o@: $(srcdir)/rpc_client/client.c
+ $(CC) $(CFLAGS) $?
+db_server_clnt@o@: $(srcdir)/rpc_client/db_server_clnt.c
+ $(CC) $(CFLAGS) $?
+gen_client@o@: $(srcdir)/rpc_client/gen_client.c
+ $(CC) $(CFLAGS) $?
+gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c
+ $(CC) $(CFLAGS) $?
+
+# RPC server files
+db_server_proc@o@: $(srcdir)/rpc_server/c/db_server_proc.c
+ $(CC) $(CFLAGS) $?
+db_server_svc@o@: $(srcdir)/rpc_server/c/db_server_svc.c
+ $(CC) $(CFLAGS) $?
+db_server_util@o@: $(srcdir)/rpc_server/c/db_server_util.c
+ $(CC) $(CFLAGS) $?
+db_server_xdr@o@: $(srcdir)/rpc_server/c/db_server_xdr.c
+ $(CC) $(CFLAGS) $?
+gen_db_server@o@: $(srcdir)/rpc_server/c/gen_db_server.c
+ $(CC) $(CFLAGS) $?
+db_server_cxxproc@o@: $(srcdir)/rpc_server/cxx/db_server_cxxproc.cpp
+ $(CXX) $(CXXFLAGS) $?
+db_server_cxxutil@o@: $(srcdir)/rpc_server/cxx/db_server_cxxutil.cpp
+ $(CXX) $(CXXFLAGS) $?
+
+##################################################
+# Utility build rules.
+##################################################
+db_archive@o@: $(srcdir)/db_archive/db_archive.c
+ $(CC) $(CFLAGS) $?
+db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c
+ $(CC) $(CFLAGS) $?
+db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c
+ $(CC) $(CFLAGS) $?
+db_dump@o@: $(srcdir)/db_dump/db_dump.c
+ $(CC) $(CFLAGS) $?
+db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
+ $(CC) $(DB185INC) $?
+db_load@o@: $(srcdir)/db_load/db_load.c
+ $(CC) $(CFLAGS) $?
+db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
+ $(CC) $(CFLAGS) $?
+db_recover@o@: $(srcdir)/db_recover/db_recover.c
+ $(CC) $(CFLAGS) $?
+db_stat@o@: $(srcdir)/db_stat/db_stat.c
+ $(CC) $(CFLAGS) $?
+db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c
+ $(CC) $(CFLAGS) $?
+db_verify@o@: $(srcdir)/db_verify/db_verify.c
+ $(CC) $(CFLAGS) $?
+
+##################################################
+# C library replacement files.
+##################################################
+getcwd@o@: $(srcdir)/clib/getcwd.c
+ $(CC) $(CFLAGS) $?
+getopt@o@: $(srcdir)/clib/getopt.c
+ $(CC) $(CFLAGS) $?
+memcmp@o@: $(srcdir)/clib/memcmp.c
+ $(CC) $(CFLAGS) $?
+memcpy@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMCOPY $(CFLAGS) $? -o $@
+memmove@o@: $(srcdir)/clib/memmove.c
+ $(CC) -DMEMMOVE $(CFLAGS) $?
+raise@o@: $(srcdir)/clib/raise.c
+ $(CC) $(CFLAGS) $?
+strcasecmp@o@: $(srcdir)/clib/strcasecmp.c
+ $(CC) $(CFLAGS) $?
+strdup@o@: $(srcdir)/clib/strdup.c
+ $(CC) $(CFLAGS) $?
+snprintf@o@: $(srcdir)/clib/snprintf.c
+ $(CC) $(CFLAGS) $?
+strerror@o@: $(srcdir)/clib/strerror.c
+ $(CC) $(CFLAGS) $?
+vsnprintf@o@: $(srcdir)/clib/vsnprintf.c
+ $(CC) $(CFLAGS) $?
diff --git a/storage/bdb/dist/RELEASE b/storage/bdb/dist/RELEASE
new file mode 100644
index 00000000000..61151b8589c
--- /dev/null
+++ b/storage/bdb/dist/RELEASE
@@ -0,0 +1,28 @@
+# $Id: RELEASE,v 11.123 2002/09/13 22:16:02 bostic Exp $
+
+DB_VERSION_MAJOR=4
+DB_VERSION_MINOR=1
+DB_VERSION_PATCH=24
+DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH"
+
+DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR`
+
+DB_RELEASE_DATE=`date "+%B %e, %Y"`
+DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION: ($DB_RELEASE_DATE)"
+
+# this file is included by all s_* scripts, so it's the way to apply
+# hacks :)
+
+# bitkeeper doesn't like somebody to mess with permissions!
+chmod()
+{
+ echo "chmod $1 $2" >/dev/null
+}
+
+# useful trick to find auto-generated files
+#cmp()
+#{
+# echo "==>> CMP $1 $2" >/dev/tty
+# /usr/bin/cmp "$1" "$2"
+#}
+
diff --git a/storage/bdb/dist/aclocal/config.ac b/storage/bdb/dist/aclocal/config.ac
new file mode 100644
index 00000000000..cd288425946
--- /dev/null
+++ b/storage/bdb/dist/aclocal/config.ac
@@ -0,0 +1,51 @@
+# Features we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_VXWORKS, [Define to 1 if building VxWorks.])
+
+AH_TEMPLATE(HAVE_FILESYSTEM_NOTZERO,
+ [Define to 1 if allocated filesystem blocks are not zeroed.])
+
+AH_TEMPLATE(HAVE_UNLINK_WITH_OPEN_FAILURE,
+ [Define to 1 if unlink of file with open file descriptors will fail.])
+
+AH_BOTTOM([/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif])
diff --git a/storage/bdb/dist/aclocal/cxx.ac b/storage/bdb/dist/aclocal/cxx.ac
new file mode 100644
index 00000000000..49103cc661a
--- /dev/null
+++ b/storage/bdb/dist/aclocal/cxx.ac
@@ -0,0 +1,17 @@
+# C++ checks to determine what style of headers to use and
+# whether to use "using" clauses.
+
+AC_DEFUN(AC_CXX_HAVE_STDHEADERS, [
+AC_SUBST(cxx_have_stdheaders)
+AC_CACHE_CHECK([whether C++ supports the ISO C++ standard includes],
+db_cv_cxx_have_stdheaders,
+[AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
+ AC_TRY_COMPILE([#include <iostream>
+],[std::ostream *o; return 0;],
+ db_cv_cxx_have_stdheaders=yes, db_cv_cxx_have_stdheaders=no)
+ AC_LANG_RESTORE
+])
+if test "$db_cv_cxx_have_stdheaders" = yes; then
+ cxx_have_stdheaders="#define HAVE_CXX_STDHEADERS 1"
+fi])
diff --git a/storage/bdb/dist/aclocal/gcc.ac b/storage/bdb/dist/aclocal/gcc.ac
new file mode 100644
index 00000000000..0949d982f17
--- /dev/null
+++ b/storage/bdb/dist/aclocal/gcc.ac
@@ -0,0 +1,36 @@
+# Version 2.96 of gcc (shipped with RedHat Linux 7.[01] and Mandrake) had
+# serious problems.
+AC_DEFUN(AC_GCC_CONFIG1, [
+AC_CACHE_CHECK([whether we are using gcc version 2.96],
+db_cv_gcc_2_96, [
+db_cv_gcc_2_96=no
+if test "$GCC" = "yes"; then
+ GCC_VERSION=`${MAKEFILE_CC} --version`
+ case ${GCC_VERSION} in
+ 2.96*)
+ db_cv_gcc_2_96=yes;;
+ esac
+fi])
+if test "$db_cv_gcc_2_96" = "yes"; then
+ CFLAGS=`echo "$CFLAGS" | sed 's/-O2/-O/'`
+ CXXFLAGS=`echo "$CXXFLAGS" | sed 's/-O2/-O/'`
+ AC_MSG_WARN([INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE.])
+ AC_MSG_WARN([GCC OPTIMIZATION LEVEL SET TO -O.])
+fi])
+
+# Versions of g++ up to 2.8.0 required -fhandle-exceptions, but it is
+# renamed as -fexceptions and is the default in versions 2.8.0 and after.
+AC_DEFUN(AC_GCC_CONFIG2, [
+AC_CACHE_CHECK([whether g++ requires -fhandle-exceptions],
+db_cv_gxx_except, [
+db_cv_gxx_except=no;
+if test "$GXX" = "yes"; then
+ GXX_VERSION=`${MAKEFILE_CXX} --version`
+ case ${GXX_VERSION} in
+ 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].*)
+ db_cv_gxx_except=yes;;
+ esac
+fi])
+if test "$db_cv_gxx_except" = "yes"; then
+ CXXFLAGS="$CXXFLAGS -fhandle-exceptions"
+fi])
diff --git a/storage/bdb/dist/aclocal/libtool.ac b/storage/bdb/dist/aclocal/libtool.ac
new file mode 100644
index 00000000000..e99faf15e4e
--- /dev/null
+++ b/storage/bdb/dist/aclocal/libtool.ac
@@ -0,0 +1,3633 @@
+# libtool.m4 - Configure libtool for the host system. -*-Shell-script-*-
+## Copyright 1996, 1997, 1998, 1999, 2000, 2001
+## Free Software Foundation, Inc.
+## Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+##
+## As a special exception to the GNU General Public License, if you
+## distribute this file as part of a program that contains a
+## configuration script generated by Autoconf, you may include it under
+## the same distribution terms that you use for the rest of that program.
+
+# serial 46 AC_PROG_LIBTOOL
+
+AC_DEFUN([AC_PROG_LIBTOOL],
+[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+# Prevent multiple expansion
+define([AC_PROG_LIBTOOL], [])
+])
+
+AC_DEFUN([AC_LIBTOOL_SETUP],
+[AC_PREREQ(2.13)dnl
+AC_REQUIRE([AC_ENABLE_SHARED])dnl
+AC_REQUIRE([AC_ENABLE_STATIC])dnl
+AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_LD])dnl
+AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl
+AC_REQUIRE([AC_PROG_NM])dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl
+AC_REQUIRE([AC_OBJEXT])dnl
+AC_REQUIRE([AC_EXEEXT])dnl
+dnl
+
+_LT_AC_PROG_ECHO_BACKSLASH
+# Only perform the check for file, if the check method requires it
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ AC_PATH_MAGIC
+ fi
+ ;;
+esac
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+AC_CHECK_TOOL(STRIP, strip, :)
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no)
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+enable_win32_dll=yes, enable_win32_dll=no)
+
+AC_ARG_ENABLE(libtool-lock,
+ [ --disable-libtool-lock avoid locking (might break parallel builds)])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '[#]line __oline__ "configure"' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+ [AC_LANG_SAVE
+ AC_LANG_C
+ AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+ AC_LANG_RESTORE])
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+[*-*-cygwin* | *-*-mingw* | *-*-pw32*)
+ AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+ AC_CHECK_TOOL(AS, as, false)
+ AC_CHECK_TOOL(OBJDUMP, objdump, false)
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one
+ AC_CACHE_CHECK([if libtool should supply DllMain function], lt_cv_need_dllmain,
+ [AC_TRY_LINK([],
+ [extern int __attribute__((__stdcall__)) DllMain(void*, int, void*);
+ DllMain (0, 0, 0);],
+ [lt_cv_need_dllmain=no],[lt_cv_need_dllmain=yes])])
+
+ case $host/$CC in
+ *-*-cygwin*/gcc*-mno-cygwin*|*-*-mingw*)
+ # old mingw systems require "-dll" to link a DLL, while more recent ones
+ # require "-mdll"
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -mdll"
+ AC_CACHE_CHECK([how to link DLLs], lt_cv_cc_dll_switch,
+ [AC_TRY_LINK([], [], [lt_cv_cc_dll_switch=-mdll],[lt_cv_cc_dll_switch=-dll])])
+ CFLAGS="$SAVE_CFLAGS" ;;
+ *-*-cygwin* | *-*-pw32*)
+ # cygwin systems need to pass --dll to the linker, and not link
+ # crt.o which will require a WinMain@16 definition.
+ lt_cv_cc_dll_switch="-Wl,--dll -nostartfiles" ;;
+ esac
+ ;;
+ ])
+esac
+
+_LT_AC_LTCONFIG_HACK
+
+])
+
+# AC_LIBTOOL_HEADER_ASSERT
+# ------------------------
+AC_DEFUN([AC_LIBTOOL_HEADER_ASSERT],
+[AC_CACHE_CHECK([whether $CC supports assert without backlinking],
+ [lt_cv_func_assert_works],
+ [case $host in
+ *-*-solaris*)
+ if test "$GCC" = yes && test "$with_gnu_ld" != yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*) lt_cv_func_assert_works=no ;;
+ *) lt_cv_func_assert_works=yes ;;
+ esac
+ fi
+ ;;
+ esac])
+
+if test "x$lt_cv_func_assert_works" = xyes; then
+ AC_CHECK_HEADERS(assert.h)
+fi
+])# AC_LIBTOOL_HEADER_ASSERT
+
+# _LT_AC_CHECK_DLFCN
+# --------------------
+AC_DEFUN([_LT_AC_CHECK_DLFCN],
+[AC_CHECK_HEADERS(dlfcn.h)
+])# _LT_AC_CHECK_DLFCN
+
+# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+# ---------------------------------
+AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE],
+[AC_REQUIRE([AC_CANONICAL_HOST])
+AC_REQUIRE([AC_PROG_NM])
+AC_REQUIRE([AC_OBJEXT])
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [dnl
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Transform the above into a raw symbol and a C symbol.
+symxfrm='\1 \2\3 \3'
+
+# Transform an extracted symbol line into a proper C declaration
+lt_cv_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[[BCDT]]'
+ ;;
+cygwin* | mingw* | pw32*)
+ symcode='[[ABCDGISTW]]'
+ ;;
+hpux*) # Its linker distinguishes data from code symbols
+ lt_cv_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+ lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+ ;;
+irix*)
+ symcode='[[BCDEGRST]]'
+ ;;
+solaris* | sysv5*)
+ symcode='[[BDT]]'
+ ;;
+sysv4)
+ symcode='[[DFNSTU]]'
+ ;;
+esac
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $host_os in
+mingw*)
+ opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
+ symcode='[[ABCDGISTW]]'
+fi
+
+# Try without a prefix undercore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Write the raw and C identifiers.
+lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*\($ac_symprfx\)$sympat$opt_cr$/$symxfrm/p'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+ rm -f conftest*
+ cat > conftest.$ac_ext <<EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+EOF
+
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if egrep ' nm_test_var$' "$nlist" >/dev/null; then
+ if egrep ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_global_symbol_to_cdecl"' < "$nlist" >> conftest.$ac_ext'
+
+ cat <<EOF >> conftest.$ac_ext
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[[]] =
+{
+EOF
+ sed "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr) \&\2},/" < "$nlist" >> conftest.$ac_ext
+ cat <<\EOF >> conftest.$ac_ext
+ {0, (lt_ptr) 0}
+};
+
+#ifdef __cplusplus
+}
+#endif
+EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$no_builtin_flag"
+ if AC_TRY_EVAL(ac_link) && test -s conftest; then
+ pipe_works=yes
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AC_FD_CC
+ fi
+ else
+ echo "$progname: failed program was:" >&AC_FD_CC
+ cat conftest.$ac_ext >&5
+ fi
+ rm -f conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+])
+global_symbol_pipe="$lt_cv_sys_global_symbol_pipe"
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ global_symbol_to_cdecl=
+ global_symbol_to_c_name_address=
+else
+ global_symbol_to_cdecl="$lt_cv_global_symbol_to_cdecl"
+ global_symbol_to_c_name_address="$lt_cv_global_symbol_to_c_name_address"
+fi
+if test -z "$global_symbol_pipe$global_symbol_to_cdec$global_symbol_to_c_name_address";
+then
+ AC_MSG_RESULT(failed)
+else
+ AC_MSG_RESULT(ok)
+fi
+]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+
+# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+# ---------------------------------
+AC_DEFUN([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR],
+[# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) lt_cv_sys_path_separator=';' ;;
+ *) lt_cv_sys_path_separator=':' ;;
+ esac
+ PATH_SEPARATOR=$lt_cv_sys_path_separator
+fi
+])# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# _LT_AC_PROG_ECHO_BACKSLASH
+# --------------------------
+# Add some code to the start of the generated configure script which
+# will find an echo command which doesn't interpret backslashes.
+AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH],
+[ifdef([AC_DIVERSION_NOTICE], [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)],
+ [AC_DIVERT_PUSH(NOTICE)])
+_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','`
+ ;;
+esac
+
+echo=${ECHO-echo}
+if test "X[$]1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X[$]1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "[$]0" --no-reexec ${1+"[$]@"}
+fi
+
+if test "X[$]1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+if test -z "$ECHO"; then
+if test "X${echo_test_string+set}" != Xset; then
+# find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if (echo_test_string="`eval $cmd`") 2>/dev/null &&
+ echo_test_string="`eval $cmd`" &&
+ (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null
+ then
+ break
+ fi
+ done
+fi
+
+if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$dir/echo"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test "X$echo" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ echo='print -r'
+ elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"}
+ else
+ # Try using printf.
+ echo='printf %s\n'
+ if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do
+ if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "[$]0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ echo=echo
+ fi
+ fi
+ fi
+ fi
+fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+ECHO=$echo
+if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then
+ ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo"
+fi
+
+AC_SUBST(ECHO)
+AC_DIVERT_POP
+])# _LT_AC_PROG_ECHO_BACKSLASH
+
+# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ------------------------------------------------------------------
+AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF],
+[if test "$cross_compiling" = yes; then :
+ [$4]
+else
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<EOF
+[#line __oline__ "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" void exit (int);
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+
+ exit (status);
+}]
+EOF
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) $1 ;;
+ x$lt_dlneed_uscore) $2 ;;
+ x$lt_unknown|x*) $3 ;;
+ esac
+ else :
+ # compilation failed
+ $3
+ fi
+fi
+rm -fr conftest*
+])# _LT_AC_TRY_DLOPEN_SELF
+
+# AC_LIBTOOL_DLOPEN_SELF
+# -------------------
+AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF],
+[if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ *)
+ AC_CHECK_FUNC([shl_load],
+ [lt_cv_dlopen="shl_load"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"],
+ [AC_CHECK_FUNC([dlopen],
+ [lt_cv_dlopen="dlopen"],
+ [AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+ [AC_CHECK_LIB([dld], [dld_link],
+ [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"])
+ ])
+ ])
+ ])
+ ])
+ ])
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ AC_CACHE_CHECK([whether a program can dlopen itself],
+ lt_cv_dlopen_self, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+ ])
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ LDFLAGS="$LDFLAGS $link_static_flag"
+ AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+ lt_cv_dlopen_self_static, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross)
+ ])
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+])# AC_LIBTOOL_DLOPEN_SELF
+
+AC_DEFUN([_LT_AC_LTCONFIG_HACK],
+[AC_REQUIRE([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])dnl
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e s/^X//'
+sed_quote_subst='s/\([[\\"\\`$\\\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([[\\"\\`\\\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Constants:
+rm="rm -f"
+
+# Global variables:
+default_ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except M$VC,
+# which needs '.lib').
+libext=a
+ltmain="$ac_aux_dir/ltmain.sh"
+ofile="$default_ofile"
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+need_locks="$enable_libtool_lock"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+test -z "$AS" && AS=as
+test -z "$CC" && CC=cc
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+test -z "$LD" && LD=ld
+test -z "$LN_S" && LN_S="ln -s"
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+test -z "$NM" && NM=nm
+test -z "$OBJDUMP" && OBJDUMP=objdump
+test -z "$RANLIB" && RANLIB=:
+test -z "$STRIP" && STRIP=:
+test -z "$ac_objext" && ac_objext=o
+
+if test x"$host" != x"$build"; then
+ ac_tool_prefix=${host_alias}-
+else
+ ac_tool_prefix=
+fi
+
+# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
+case $host_os in
+linux-gnu*) ;;
+linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
+esac
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds"
+ ;;
+ *)
+ old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+
+# Allow CC to be a program name with arguments.
+set dummy $CC
+compiler="[$]2"
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([for objdir])
+rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+AC_MSG_RESULT($objdir)
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+AC_ARG_WITH(pic,
+[ --with-pic try to use only PIC/non-PIC objects [default=use both]],
+pic_mode="$withval", pic_mode=default)
+test -z "$pic_mode" && pic_mode=default
+
+# We assume here that the value for lt_cv_prog_cc_pic will not be cached
+# in isolation, and that seeing it set (from the cache) indicates that
+# the associated values are set (in the cache) correctly too.
+AC_MSG_CHECKING([for $compiler option to produce PIC])
+AC_CACHE_VAL(lt_cv_prog_cc_pic,
+[ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_shlib=
+ lt_cv_prog_cc_wl=
+ lt_cv_prog_cc_static=
+ lt_cv_prog_cc_no_builtin=
+ lt_cv_prog_cc_can_build_shared=$can_build_shared
+
+ if test "$GCC" = yes; then
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-static'
+
+ case $host_os in
+ aix*)
+ # Below there is a dirty hack to force normal static linking with -ldl
+ # The problem is because libdl dynamically linked with both libc and
+ # libC (AIX C++ library), which obviously doesn't included in libraries
+ # list by gcc. This cause undefined symbols with -static flags.
+ # This hack allows C programs to be linked with "-static -ldl", but
+ # not sure about C++ programs.
+ lt_cv_prog_cc_static="$lt_cv_prog_cc_static ${lt_cv_prog_cc_wl}-lC"
+ ;;
+ amigaos*)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_cv_prog_cc_pic='-fno-common'
+ ;;
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_cv_prog_cc_pic=-Kconform_pic
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for PIC flags for the system compiler.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ lt_cv_prog_cc_wl='-Wl,'
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_cv_prog_cc_static='-Bstatic'
+ else
+ lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ # Is there a better lt_cv_prog_cc_static that works with the bundled CC?
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static="${lt_cv_prog_cc_wl}-a ${lt_cv_prog_cc_wl}archive"
+ lt_cv_prog_cc_pic='+Z'
+ ;;
+
+ irix5* | irix6*)
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ # PIC (with -KPIC) is the default.
+ ;;
+
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+
+ newsos6)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ # All OSF/1 code is PIC.
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ ;;
+
+ sco3.2v5*)
+ lt_cv_prog_cc_pic='-Kpic'
+ lt_cv_prog_cc_static='-dn'
+ lt_cv_prog_cc_shlib='-belf'
+ ;;
+
+ solaris*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Wl,'
+ ;;
+
+ sunos4*)
+ lt_cv_prog_cc_pic='-PIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Qoption ld '
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ if test "x$host_vendor" = xsni; then
+ lt_cv_prog_cc_wl='-LD'
+ else
+ lt_cv_prog_cc_wl='-Wl,'
+ fi
+ ;;
+
+ uts4*)
+ lt_cv_prog_cc_pic='-pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_cv_prog_cc_pic='-Kconform_pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ fi
+ ;;
+
+ *)
+ lt_cv_prog_cc_can_build_shared=no
+ ;;
+ esac
+ fi
+])
+if test -z "$lt_cv_prog_cc_pic"; then
+ AC_MSG_RESULT([none])
+else
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic])
+
+ # Check to make sure the pic_flag actually works.
+ AC_MSG_CHECKING([if $compiler PIC flag $lt_cv_prog_cc_pic works])
+ AC_CACHE_VAL(lt_cv_prog_cc_pic_works, [dnl
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $lt_cv_prog_cc_pic -DPIC"
+ AC_TRY_COMPILE([], [], [dnl
+ case $host_os in
+ hpux9* | hpux10* | hpux11*)
+ # On HP-UX, both CC and GCC only warn that PIC is supported... then
+ # they create non-PIC objects. So, if there were any warnings, we
+ # assume that PIC is not supported.
+ if test -s conftest.err; then
+ lt_cv_prog_cc_pic_works=no
+ else
+ lt_cv_prog_cc_pic_works=yes
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic_works=yes
+ ;;
+ esac
+ ], [dnl
+ lt_cv_prog_cc_pic_works=no
+ ])
+ CFLAGS="$save_CFLAGS"
+ ])
+
+ if test "X$lt_cv_prog_cc_pic_works" = Xno; then
+ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_can_build_shared=no
+ else
+ lt_cv_prog_cc_pic=" $lt_cv_prog_cc_pic"
+ fi
+
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic_works])
+fi
+##
+## END FIXME
+
+# Check for any special shared library compilation flags.
+if test -n "$lt_cv_prog_cc_shlib"; then
+ AC_MSG_WARN([\`$CC' requires \`$lt_cv_prog_cc_shlib' to build shared libraries])
+ if echo "$old_CC $old_CFLAGS " | egrep -e "[[ ]]$lt_cv_prog_cc_shlib[[ ]]" >/dev/null; then :
+ else
+ AC_MSG_WARN([add \`$lt_cv_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure])
+ lt_cv_prog_cc_can_build_shared=no
+ fi
+fi
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([if $compiler static flag $lt_cv_prog_cc_static works])
+AC_CACHE_VAL([lt_cv_prog_cc_static_works], [dnl
+ lt_cv_prog_cc_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_cv_prog_cc_static"
+ AC_TRY_LINK([], [], [lt_cv_prog_cc_static_works=yes])
+ LDFLAGS="$save_LDFLAGS"
+])
+
+# Belt *and* braces to stop my trousers falling down:
+test "X$lt_cv_prog_cc_static_works" = Xno && lt_cv_prog_cc_static=
+AC_MSG_RESULT([$lt_cv_prog_cc_static_works])
+
+pic_flag="$lt_cv_prog_cc_pic"
+special_shlib_compile_flags="$lt_cv_prog_cc_shlib"
+wl="$lt_cv_prog_cc_wl"
+link_static_flag="$lt_cv_prog_cc_static"
+no_builtin_flag="$lt_cv_prog_cc_no_builtin"
+can_build_shared="$lt_cv_prog_cc_can_build_shared"
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+# Check to see if options -o and -c are simultaneously supported by compiler
+AC_MSG_CHECKING([if $compiler supports -c -o file.$ac_objext])
+AC_CACHE_VAL([lt_cv_compiler_c_o], [
+$rm -r conftest 2>/dev/null
+mkdir conftest
+cd conftest
+echo "int some_variable = 0;" > conftest.$ac_ext
+mkdir out
+# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
+# that will create temporary files in the current directory regardless of
+# the output directory. Thus, making CWD read-only will cause this test
+# to fail, enabling locking or at least warning the user not to do parallel
+# builds.
+chmod -w .
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -o out/conftest2.$ac_objext"
+compiler_c_o=no
+if { (eval echo configure:__oline__: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s out/conftest.err; then
+ lt_cv_compiler_c_o=no
+ else
+ lt_cv_compiler_c_o=yes
+ fi
+else
+ # Append any errors to the config.log.
+ cat out/conftest.err 1>&AC_FD_CC
+ lt_cv_compiler_c_o=no
+fi
+CFLAGS="$save_CFLAGS"
+chmod u+w .
+$rm conftest* out/*
+rmdir out
+cd ..
+rmdir conftest
+$rm -r conftest 2>/dev/null
+])
+compiler_c_o=$lt_cv_compiler_c_o
+AC_MSG_RESULT([$compiler_c_o])
+
+if test x"$compiler_c_o" = x"yes"; then
+ # Check to see if we can write to a .lo
+ AC_MSG_CHECKING([if $compiler supports -c -o file.lo])
+ AC_CACHE_VAL([lt_cv_compiler_o_lo], [
+ lt_cv_compiler_o_lo=no
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -c -o conftest.lo"
+ save_objext="$ac_objext"
+ ac_objext=lo
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ lt_cv_compiler_o_lo=no
+ else
+ lt_cv_compiler_o_lo=yes
+ fi
+ ])
+ ac_objext="$save_objext"
+ CFLAGS="$save_CFLAGS"
+ ])
+ compiler_o_lo=$lt_cv_compiler_o_lo
+ AC_MSG_RESULT([$compiler_o_lo])
+else
+ compiler_o_lo=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check to see if we can do hard links to lock some files if needed
+hard_links="nottested"
+if test "$compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ AC_MSG_CHECKING([if we can lock with hard links])
+ hard_links=yes
+ $rm conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ AC_MSG_RESULT([$hard_links])
+ if test "$hard_links" = no; then
+ AC_MSG_WARN([\`$CC' does not support \`-c -o', so \`make -j' may be unsafe])
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+if test "$GCC" = yes; then
+ # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
+ AC_MSG_CHECKING([if $compiler supports -fno-rtti -fno-exceptions])
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.$ac_ext"
+ compiler_rtti_exceptions=no
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ compiler_rtti_exceptions=no
+ else
+ compiler_rtti_exceptions=yes
+ fi
+ ])
+ CFLAGS="$save_CFLAGS"
+ AC_MSG_RESULT([$compiler_rtti_exceptions])
+
+ if test "$compiler_rtti_exceptions" = "yes"; then
+ no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
+ else
+ no_builtin_flag=' -fno-builtin'
+ fi
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# See if the linker supports building shared libraries.
+AC_MSG_CHECKING([whether the linker ($LD) supports shared libraries])
+
+allow_undefined_flag=
+no_undefined_flag=
+need_lib_prefix=unknown
+need_version=unknown
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+archive_cmds=
+archive_expsym_cmds=
+old_archive_from_new_cmds=
+old_archive_from_expsyms_cmds=
+export_dynamic_flag_spec=
+whole_archive_flag_spec=
+thread_safe_flag_spec=
+hardcode_into_libs=no
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+hardcode_shlibpath_var=unsupported
+runpath_var=
+link_all_deplibs=unknown
+always_export_symbols=no
+export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
+# include_expsyms should be a list of space-separated symbols to be *always*
+# included in the symbol list
+include_expsyms=
+# exclude_expsyms can be an egrep regular expression of symbols to exclude
+# it will be wrapped by ` (' and `)$', so one must not match beginning or
+# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+# as well as any symbol that contains `d'.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
+# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+# platforms (ab)use it in PIC code, but their linkers get confused if
+# the symbol is explicitly referenced. Since portable code cannot
+# rely on this symbol name, it's probably fine to never include it in
+# preloaded symbol tables.
+extract_expsyms_cmds=
+
+case $host_os in
+cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ # On AIX, the GNU linker is very broken
+ # Note:Check GNU linker on AIX 5-IA64 when/if it becomes available.
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+EOF
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we can use
+ # them.
+ ld_shlibs=no
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+
+ extract_expsyms_cmds='test -f $output_objdir/impgen.c || \
+ sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //;s/^# *$//; p; }" -e d < $''0 > $output_objdir/impgen.c~
+ test -f $output_objdir/impgen.exe || (cd $output_objdir && \
+ if test "x$HOST_CC" != "x" ; then $HOST_CC -o impgen impgen.c ; \
+ else $CC -o impgen impgen.c ; fi)~
+ $output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def'
+
+ old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib'
+
+ # cygwin and mingw dlls have different entry points and sets of symbols
+ # to exclude.
+ # FIXME: what about values for MSVC?
+ dll_entry=__cygwin_dll_entry@12
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~
+ case $host_os in
+ mingw*)
+ # mingw values
+ dll_entry=_DllMainCRTStartup@12
+ dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~
+ ;;
+ esac
+
+ # mingw and cygwin differ, and it's simplest to just exclude the union
+ # of the two symbol sets.
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one (in ltdll.c)
+ if test "x$lt_cv_need_dllmain" = "xyes"; then
+ ltdll_obj='$output_objdir/$soname-ltdll.'"$ac_objext "
+ ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $''0 > $output_objdir/$soname-ltdll.c~
+ test -f $output_objdir/$soname-ltdll.$ac_objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~'
+ else
+ ltdll_obj=
+ ltdll_cmds=
+ fi
+
+ # Extract the symbol export list from an `--export-all' def file,
+ # then regenerate the def file from the symbol export list, so that
+ # the compiled dll only exports the symbol export list.
+ # Be careful not to strip the DATA tag left be newer dlltools.
+ export_symbols_cmds="$ltdll_cmds"'
+ $DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~
+ sed -e "1,/EXPORTS/d" -e "s/ @ [[0-9]]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols'
+
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is.
+ # If DATA tags from a recent dlltool are present, honour them!
+ archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname-def;
+ else
+ echo EXPORTS > $output_objdir/$soname-def;
+ _lt_hint=1;
+ cat $export_symbols | while read symbol; do
+ set dummy \$symbol;
+ case \[$]# in
+ 2) echo " \[$]2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;;
+ *) echo " \[$]2 @ \$_lt_hint \[$]3 ; " >> $output_objdir/$soname-def;;
+ esac;
+ _lt_hint=`expr 1 + \$_lt_hint`;
+ done;
+ fi~
+ '"$ltdll_cmds"'
+ $CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~
+ $CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~
+ $CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags'
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris* | sysv5*)
+ if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+EOF
+ elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = yes; then
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ case $host_os in
+ cygwin* | mingw* | pw32*)
+ # dlltool doesn't understand --whole-archive et. al.
+ whole_archive_flag_spec=
+ ;;
+ *)
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ ;;
+ esac
+ fi
+else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$link_static_flag"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ hardcode_direct=yes
+ archive_cmds=''
+ hardcode_libdir_separator=':'
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # Added $aix_export variable to control use of exports file.
+ # For non-gcc, we don't use exports files, and rather trust
+ # the binder's -qmkshrobj option to export all the mangled
+ # symbols we need for C++ and java.
+
+ aix_export="\${wl}$exp_sym_flag:\$export_symbols"
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ esac
+
+ shared_flag='-shared'
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ shared_flag='${wl}-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+
+ # Test for -qmkshrobj and use it if it's available.
+ # It's superior for determining exportable symbols,
+ # especially for C++ or JNI libraries, which have
+ # mangled names.
+ #
+ AC_LANG_CONFTEST(void f(){})
+ if AC_TRY_EVAL(CC -c conftest.c) && AC_TRY_EVAL(CC -o conftest conftest.$ac_objext -qmkshrobj -lC_r); then
+ lt_cv_aix_mkshrobj=yes
+ else
+ lt_cv_aix_mkshrobj=no
+ fi
+
+ if test "$lt_cv_aix_mkshrobj" = yes; then
+ aix_export="-qmkshrobj"
+ fi
+ fi
+
+ # It seems that -bexpall can do strange things, so it is better to
+ # generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
+ archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag $aix_export $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname ${wl}-h$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"
+ else
+ hardcode_libdir_flag_spec='${wl}-bnolibpath ${wl}-blibpath:$libdir:/usr/lib:/lib'
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='${wl}-berok'
+ # This is a bit strange, but is similar to how AIX traditionally builds
+ # it's shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"' ~$AR -crlo $objdir/$libname$release.a $objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs'
+ fix_srcfile_path='`cygpath -w "$srcfile"`'
+ ;;
+
+ darwin* | rhapsody*)
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ allow_undefined_flag='-undefined suppress'
+ ;;
+ *) # Darwin 1.3 on
+ allow_undefined_flag='-flat_namespace -undefined suppress'
+ ;;
+ esac
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+
+ #### Local change for Sleepycat's Berkeley DB [#5664] [#6511]
+ case "$host_os" in
+ darwin[[12345]].*)
+ # removed double quotes in the following line:
+ archive_cmds='$nonopt $(test x$module = xyes && echo -bundle || echo -dynamiclib) $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -install_name $rpath/$soname $verstring'
+ ;;
+ *) # Darwin6.0 on (Mac OS/X Jaguar)
+ archive_cmds='$nonopt $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -dynamiclib -install_name $rpath/$soname $verstring'
+ ;;
+ esac
+ #### End of changes for Sleepycat's Berkeley DB [#5664] [#6511]
+
+ # We need to add '_' to the symbols in $export_symbols first
+ #archive_expsym_cmds="$archive_cmds"' && strip -s $export_symbols'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ whole_archive_flag_spec='-all_load $convenience'
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ case $host_os in
+ hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
+ *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;;
+ esac
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_minus_L=yes # Not in the search PATH, but as the default
+ # location of the library.
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ irix5* | irix6*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ link_all_deplibs=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ openbsd*)
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case "$host_os" in
+ openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~
+ $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp'
+
+ #Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+
+ sco3.2v5*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ ;;
+
+ solaris*)
+ # gcc --version < 3.0 without binutils cannot create self contained
+ # shared libraries reliably, requiring libgcc.a to resolve some of
+ # the object symbols generated in some cases. Libraries that use
+ # assert need libgcc.a to resolve __eprintf, for example. Linking
+ # a copy of libgcc.a into every shared library to guarantee resolving
+ # such symbols causes other problems: According to Tim Van Holder
+ # <tim.van.holder@pandora.be>, C++ libraries end up with a separate
+ # (to the application) exception stack for one thing.
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*)
+ cat <<EOF 1>&2
+
+*** Warning: Releases of GCC earlier than version 3.0 cannot reliably
+*** create self contained shared libraries on Solaris systems, without
+*** introducing a dependency on libgcc.a. Therefore, libtool is disabling
+*** -no-undefined support, which will at least allow you to build shared
+*** libraries. However, you may find that when you link such libraries
+*** into an application without using GCC, you have to manually add
+*** \`gcc --print-libgcc-file-name\` to the link command. We urge you to
+*** upgrade to a newer version of GCC. Another option is to rebuild your
+*** current GCC to use the GNU linker from GNU binutils 2.9.1 or newer.
+
+EOF
+ no_undefined_flag=
+ ;;
+ esac
+ fi
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *) # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ if test "x$host_vendor" = xsno; then
+ archive_cmds='$LD -G -Bsymbolic -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ else
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv5*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec=
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4.2uw2*)
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=no
+ hardcode_runpath_var=yes
+ runpath_var=LD_RUN_PATH
+ ;;
+
+ sysv5uw7* | unixware7*)
+ no_undefined_flag='${wl}-z ${wl}text'
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+AC_MSG_RESULT([$ld_shlibs])
+test "$ld_shlibs" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check hardcoding attributes.
+AC_MSG_CHECKING([how to hardcode library paths into programs])
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" || \
+ test -n "$runpath_var"; then
+
+ # We can hardcode non-existant directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$hardcode_shlibpath_var" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+AC_MSG_RESULT([$hardcode_action])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+##
+## END FIXME
+
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+## FIXME: this should be a separate macro
+##
+# PORTME Fill in your ld.so characteristics
+AC_MSG_CHECKING([dynamic linker characteristics])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}.so$major'
+ ;;
+
+aix4* | aix5*)
+ version_type=linux
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # If we don't set need_version, we'll get x.so.0.0.0,
+ # even if -avoid-version is set.
+ need_version=no
+
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}.so$major ${libname}${release}.so$versuffix $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[[01]] | aix4.[[01]].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can
+ # not hardcode correct soname into executable. Probably we can
+ # add versioning support to collect2, so additional links can
+ # be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}.so$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
+ ;;
+
+beos*)
+ library_names_spec='${libname}.so'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi4*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ export_dynamic_flag_spec=-rdynamic
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32*)
+ version_type=windows
+ need_version=no
+ need_lib_prefix=no
+ case $GCC,$host_os in
+ yes,cygwin*)
+ library_names_spec='$libname.dll.a'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ postinstall_cmds='dlpath=`bash 2>&1 -c '\''. $dir/${file}i;echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog .libs/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`bash 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $rm \$dlpath'
+ ;;
+ yes,mingw*)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | sed -e "s/^libraries://" -e "s/;/ /g"`
+ ;;
+ yes,pw32*)
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll'
+ ;;
+ *)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ # added support for -jnimodule, encapsulated below in ${darwin_suffix}
+ darwin_suffix='$(test .$jnimodule = .yes && echo jnilib || (test .$module = .yes && echo so || echo dylib))'
+ library_names_spec='${libname}${release}${versuffix}.'"${darwin_suffix}"' ${libname}${release}${major}.'"${darwin_suffix}"' ${libname}.'"${darwin_suffix}"
+ soname_spec='${libname}${release}${major}.'"${darwin_suffix}"
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd*)
+ objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ *)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ dynamic_linker="$host_os dld.sl"
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
+ soname_spec='${libname}${release}.sl$major'
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+irix5* | irix6*)
+ version_type=irix
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so $libname.so'
+ case $host_os in
+ irix5*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+nto-qnx)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+openbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case "$host_os" in
+ openbsd2.[[89]] | openbsd2.[[89]].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+os2*)
+ libname_spec='$name'
+ need_lib_prefix=no
+ library_names_spec='$libname.dll $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_version=no
+ soname_spec='${libname}${release}.so'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+sco3.2v5*)
+ version_type=osf
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
+ soname_spec='$libname.so.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Report the final consequences.
+AC_MSG_CHECKING([if libtool supports shared libraries])
+AC_MSG_RESULT([$can_build_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build shared libraries])
+test "$can_build_shared" = "no" && enable_shared=no
+
+# On AIX, shared libraries and static libraries use the same namespace, and
+# are all built from PIC.
+case "$host_os" in
+aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+aix4*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+esac
+AC_MSG_RESULT([$enable_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build static libraries])
+# Make sure either enable_shared or enable_static is yes.
+test "$enable_shared" = yes || enable_static=yes
+AC_MSG_RESULT([$enable_static])
+##
+## END FIXME
+
+if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+AC_LIBTOOL_DLOPEN_SELF
+
+## FIXME: this should be a separate macro
+##
+if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ AC_MSG_CHECKING([whether -lc should be explicitly linked in])
+ AC_CACHE_VAL([lt_cv_archive_cmds_need_lc],
+ [$rm conftest*
+ echo 'static int dummy;' > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile); then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_cv_prog_cc_wl
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if AC_TRY_EVAL(archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1)
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi])
+ AC_MSG_RESULT([$lt_cv_archive_cmds_need_lc])
+ ;;
+ esac
+fi
+need_lc=${lt_cv_archive_cmds_need_lc-yes}
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# The second clause should only fire when bootstrapping the
+# libtool distribution, otherwise you forgot to ship ltmain.sh
+# with your package, and you will get complaints that there are
+# no rules to generate ltmain.sh.
+if test -f "$ltmain"; then
+ :
+else
+ # If there is no Makefile yet, we rely on a make rule to execute
+ # `config.status --recheck' to rerun these tests and create the
+ # libtool script then.
+ test -f Makefile && make "$ltmain"
+fi
+
+if test -f "$ltmain"; then
+ trap "$rm \"${ofile}T\"; exit 1" 1 2 15
+ $rm -f "${ofile}T"
+
+ echo creating $ofile
+
+ # Now quote all the things that may contain metacharacters while being
+ # careful not to overquote the AC_SUBSTed values. We take copies of the
+ # variables and quote the copies for generation of the libtool script.
+ for var in echo old_CC old_CFLAGS \
+ AR AR_FLAGS CC LD LN_S NM SHELL \
+ reload_flag reload_cmds wl \
+ pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
+ thread_safe_flag_spec whole_archive_flag_spec libname_spec \
+ library_names_spec soname_spec \
+ RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
+ old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds \
+ postuninstall_cmds extract_expsyms_cmds old_archive_from_expsyms_cmds \
+ old_striplib striplib file_magic_cmd export_symbols_cmds \
+ deplibs_check_method allow_undefined_flag no_undefined_flag \
+ finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
+ global_symbol_to_c_name_address \
+ hardcode_libdir_flag_spec hardcode_libdir_separator \
+ sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
+ compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
+
+ case $var in
+ reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
+ old_postinstall_cmds | old_postuninstall_cmds | \
+ export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
+ extract_expsyms_cmds | old_archive_from_expsyms_cmds | \
+ postinstall_cmds | postuninstall_cmds | \
+ finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
+ # Double-quote double-evaled strings.
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
+ ;;
+ *)
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
+ ;;
+ esac
+ done
+
+ cat <<__EOF__ > "${ofile}T"
+#! $SHELL
+
+# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996-2000 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="sed -e s/^X//"
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$need_lc
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+
+# An echo program that does not interpret backslashes.
+echo=$lt_echo
+
+# The archiver.
+AR=$lt_AR
+AR_FLAGS=$lt_AR_FLAGS
+
+# The default C compiler.
+CC=$lt_CC
+
+# Is the compiler the GNU C compiler?
+with_gcc=$GCC
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# Whether we need hard or soft links.
+LN_S=$lt_LN_S
+
+# A BSD-compatible nm program.
+NM=$lt_NM
+
+# A symbol stripping program
+STRIP=$STRIP
+
+# Used to examine libraries when file_magic_cmd begins "file"
+MAGIC_CMD=$MAGIC_CMD
+
+# Used on cygwin: DLL creation program.
+DLLTOOL="$DLLTOOL"
+
+# Used on cygwin: object dumper.
+OBJDUMP="$OBJDUMP"
+
+# Used on cygwin: assembler.
+AS="$AS"
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# How to pass a linker flag through the compiler.
+wl=$lt_wl
+
+# Object file suffix (normally "o").
+objext="$ac_objext"
+
+# Old archive suffix (normally "a").
+libext="$libext"
+
+# Executable file suffix (normally "").
+exeext="$exeext"
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_pic_flag
+pic_mode=$pic_mode
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_compiler_c_o
+
+# Can we write directly to a .lo ?
+compiler_o_lo=$lt_compiler_o_lo
+
+# Must we lock files when doing compilation ?
+need_locks=$lt_need_locks
+
+# Do we need the lib prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_link_static_flag
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_no_builtin_flag
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Compiler flag to generate thread-safe objects.
+thread_safe_flag_spec=$lt_thread_safe_flag_spec
+
+# Library versioning type.
+version_type=$version_type
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME.
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Commands used to build and install an old-style archive.
+RANLIB=$lt_RANLIB
+old_archive_cmds=$lt_old_archive_cmds
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build and install a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+postinstall_cmds=$lt_postinstall_cmds
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method == file_magic.
+file_magic_cmd=$lt_file_magic_cmd
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that forces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# Same as above, but a single script fragment to be evaled but not shown.
+finish_eval=$lt_finish_eval
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration
+global_symbol_to_cdecl=$lt_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair
+global_symbol_to_c_name_address=$lt_global_symbol_to_c_name_address
+
+# This is the shared library runtime path variable.
+runpath_var=$runpath_var
+
+# This is the shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
+# the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at relink time.
+variables_saved_for_relink="$variables_saved_for_relink"
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Compile-time system search path for libraries
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path="$fix_srcfile_path"
+
+# Set to yes if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# ### END LIBTOOL CONFIG
+
+__EOF__
+
+ case $host_os in
+ aix3*)
+ cat <<\EOF >> "${ofile}T"
+
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+EOF
+ ;;
+ esac
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ cat <<'EOF' >> "${ofile}T"
+ # This is a source program that is used to create dlls on Windows
+ # Don't remove nor modify the starting and closing comments
+# /* ltdll.c starts here */
+# #define WIN32_LEAN_AND_MEAN
+# #include <windows.h>
+# #undef WIN32_LEAN_AND_MEAN
+# #include <stdio.h>
+#
+# #ifndef __CYGWIN__
+# # ifdef __CYGWIN32__
+# # define __CYGWIN__ __CYGWIN32__
+# # endif
+# #endif
+#
+# #ifdef __cplusplus
+# extern "C" {
+# #endif
+# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
+# #ifdef __cplusplus
+# }
+# #endif
+#
+# #ifdef __CYGWIN__
+# #include <cygwin/cygwin_dll.h>
+# DECLARE_CYGWIN_DLL( DllMain );
+# #endif
+# HINSTANCE __hDllInstance_base;
+#
+# BOOL APIENTRY
+# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
+# {
+# __hDllInstance_base = hInst;
+# return TRUE;
+# }
+# /* ltdll.c ends here */
+ # This is a source program that is used to create import libraries
+ # on Windows for dlls which lack them. Don't remove nor modify the
+ # starting and closing comments
+# /* impgen.c starts here */
+# /* Copyright (C) 1999-2000 Free Software Foundation, Inc.
+#
+# This file is part of GNU libtool.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# */
+#
+# #include <stdio.h> /* for printf() */
+# #include <unistd.h> /* for open(), lseek(), read() */
+# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
+# #include <string.h> /* for strdup() */
+#
+# /* O_BINARY isn't required (or even defined sometimes) under Unix */
+# #ifndef O_BINARY
+# #define O_BINARY 0
+# #endif
+#
+# static unsigned int
+# pe_get16 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[2];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 2);
+# return b[0] + (b[1]<<8);
+# }
+#
+# static unsigned int
+# pe_get32 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[4];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 4);
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# static unsigned int
+# pe_as32 (ptr)
+# void *ptr;
+# {
+# unsigned char *b = ptr;
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# int
+# main (argc, argv)
+# int argc;
+# char *argv[];
+# {
+# int dll;
+# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
+# unsigned long export_rva, export_size, nsections, secptr, expptr;
+# unsigned long name_rvas, nexp;
+# unsigned char *expdata, *erva;
+# char *filename, *dll_name;
+#
+# filename = argv[1];
+#
+# dll = open(filename, O_RDONLY|O_BINARY);
+# if (dll < 1)
+# return 1;
+#
+# dll_name = filename;
+#
+# for (i=0; filename[i]; i++)
+# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
+# dll_name = filename + i +1;
+#
+# pe_header_offset = pe_get32 (dll, 0x3c);
+# opthdr_ofs = pe_header_offset + 4 + 20;
+# num_entries = pe_get32 (dll, opthdr_ofs + 92);
+#
+# if (num_entries < 1) /* no exports */
+# return 1;
+#
+# export_rva = pe_get32 (dll, opthdr_ofs + 96);
+# export_size = pe_get32 (dll, opthdr_ofs + 100);
+# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
+# secptr = (pe_header_offset + 4 + 20 +
+# pe_get16 (dll, pe_header_offset + 4 + 16));
+#
+# expptr = 0;
+# for (i = 0; i < nsections; i++)
+# {
+# char sname[8];
+# unsigned long secptr1 = secptr + 40 * i;
+# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+# lseek(dll, secptr1, SEEK_SET);
+# read(dll, sname, 8);
+# if (vaddr <= export_rva && vaddr+vsize > export_rva)
+# {
+# expptr = fptr + (export_rva - vaddr);
+# if (export_rva + export_size > vaddr + vsize)
+# export_size = vsize - (export_rva - vaddr);
+# break;
+# }
+# }
+#
+# expdata = (unsigned char*)malloc(export_size);
+# lseek (dll, expptr, SEEK_SET);
+# read (dll, expdata, export_size);
+# erva = expdata - export_rva;
+#
+# nexp = pe_as32 (expdata+24);
+# name_rvas = pe_as32 (expdata+32);
+#
+# printf ("EXPORTS\n");
+# for (i = 0; i<nexp; i++)
+# {
+# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
+# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
+# }
+#
+# return 0;
+# }
+# /* impgen.c ends here */
+
+EOF
+ ;;
+ esac
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "${ofile}T" || (rm -f "${ofile}T"; exit 1)
+
+ mv -f "${ofile}T" "$ofile" || \
+ (rm -f "$ofile" && cp "${ofile}T" "$ofile" && rm -f "${ofile}T")
+ chmod +x "$ofile"
+fi
+##
+## END FIXME
+
+])# _LT_AC_LTCONFIG_HACK
+
+# AC_LIBTOOL_DLOPEN - enable checks for dlopen support
+AC_DEFUN([AC_LIBTOOL_DLOPEN], [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])])
+
+# AC_LIBTOOL_WIN32_DLL - declare package support for building win32 dll's
+AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_BEFORE([$0], [AC_LIBTOOL_SETUP])])
+
+# AC_ENABLE_SHARED - implement the --enable-shared flag
+# Usage: AC_ENABLE_SHARED[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_SHARED],
+[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(shared,
+changequote(<<, >>)dnl
+<< --enable-shared[=PKGS] build shared libraries [default=>>AC_ENABLE_SHARED_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_shared=yes ;;
+no) enable_shared=no ;;
+*)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_shared=AC_ENABLE_SHARED_DEFAULT)dnl
+])
+
+# AC_DISABLE_SHARED - set the default shared flag to --disable-shared
+AC_DEFUN([AC_DISABLE_SHARED],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_SHARED(no)])
+
+# AC_ENABLE_STATIC - implement the --enable-static flag
+# Usage: AC_ENABLE_STATIC[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_STATIC],
+[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(static,
+changequote(<<, >>)dnl
+<< --enable-static[=PKGS] build static libraries [default=>>AC_ENABLE_STATIC_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_static=yes ;;
+no) enable_static=no ;;
+*)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_static=AC_ENABLE_STATIC_DEFAULT)dnl
+])
+
+# AC_DISABLE_STATIC - set the default static flag to --disable-static
+AC_DEFUN([AC_DISABLE_STATIC],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_STATIC(no)])
+
+
+# AC_ENABLE_FAST_INSTALL - implement the --enable-fast-install flag
+# Usage: AC_ENABLE_FAST_INSTALL[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_FAST_INSTALL],
+[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(fast-install,
+changequote(<<, >>)dnl
+<< --enable-fast-install[=PKGS] optimize for fast installation [default=>>AC_ENABLE_FAST_INSTALL_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_fast_install=yes ;;
+no) enable_fast_install=no ;;
+*)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_fast_install=AC_ENABLE_FAST_INSTALL_DEFAULT)dnl
+])
+
+# AC_DISABLE_FAST_INSTALL - set the default to --disable-fast-install
+AC_DEFUN([AC_DISABLE_FAST_INSTALL],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_FAST_INSTALL(no)])
+
+# AC_LIBTOOL_PICMODE - implement the --with-pic flag
+# Usage: AC_LIBTOOL_PICMODE[(MODE)]
+# Where MODE is either `yes' or `no'. If omitted, it defaults to
+# `both'.
+AC_DEFUN([AC_LIBTOOL_PICMODE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+pic_mode=ifelse($#,1,$1,default)])
+
+
+# AC_PATH_TOOL_PREFIX - find a file program which can recognise shared library
+AC_DEFUN([AC_PATH_TOOL_PREFIX],
+[AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+ /*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a dos path.
+ ;;
+ *)
+ ac_save_MAGIC_CMD="$MAGIC_CMD"
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word. This closes a longstanding sh security hole.
+ ac_dummy="ifelse([$2], , $PATH, [$2])"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$1; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ MAGIC_CMD="$ac_save_MAGIC_CMD"
+ ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ AC_MSG_RESULT($MAGIC_CMD)
+else
+ AC_MSG_RESULT(no)
+fi
+])
+
+
+# AC_PATH_MAGIC - find a file program which can recognise a shared library
+AC_DEFUN([AC_PATH_MAGIC],
+[AC_REQUIRE([AC_CHECK_TOOL_PREFIX])dnl
+AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin:$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ AC_PATH_TOOL_PREFIX(file, /usr/bin:$PATH)
+ else
+ MAGIC_CMD=:
+ fi
+fi
+])
+
+
+# AC_PROG_LD - find the path to the GNU or non-GNU linker
+AC_DEFUN([AC_PROG_LD],
+[AC_ARG_WITH(gnu-ld,
+[ --with-gnu-ld assume the C compiler uses GNU ld [default=no]],
+test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no)
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by GCC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]]* | [[A-Za-z]]:[[\\/]]*)
+ re_direlt='/[[^/]][[^/]]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$lt_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+AC_PROG_LD_GNU
+])
+
+# AC_PROG_LD_GNU -
+AC_DEFUN([AC_PROG_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ lt_cv_prog_gnu_ld=yes
+else
+ lt_cv_prog_gnu_ld=no
+fi])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])
+
+# AC_PROG_LD_RELOAD_FLAG - find reload flag for linker
+# -- PORTME Some linkers may need a different reload flag.
+AC_DEFUN([AC_PROG_LD_RELOAD_FLAG],
+[AC_CACHE_CHECK([for $LD option to reload object files], lt_cv_ld_reload_flag,
+[lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+test -n "$reload_flag" && reload_flag=" $reload_flag"
+])
+
+# AC_DEPLIBS_CHECK_METHOD - how to check for library dependencies
+# -- PORTME fill in with the dynamic library characteristics
+AC_DEFUN([AC_DEPLIBS_CHECK_METHOD],
+[AC_CACHE_CHECK([how to recognise dependant libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given egrep regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix4* | aix5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi4*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin* | mingw* | pw32*)
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method='file_magic Mach-O dynamically linked shared library'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ lt_cv_file_magic_test_file=`echo /System/Library/Frameworks/System.framework/Versions/*/System | head -1`
+ ;;
+ *) # Darwin 1.3 on
+ lt_cv_file_magic_test_file='/usr/lib/libSystem.dylib'
+ ;;
+ esac
+ ;;
+
+freebsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD)/i[[3-9]]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20*|hpux11*)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+
+irix5* | irix6*)
+ case $host_os in
+ irix5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[[1234]] dynamic lib MIPS - version 1"
+ ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*`
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ case $host_cpu in
+ alpha* | hppa* | i*86 | powerpc* | sparc* | ia64* )
+ lt_cv_deplibs_check_method=pass_all ;;
+ *)
+ # glibc up to 2.1.1 does not perform some relocations on ARM
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so`
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so\.[[0-9]]+\.[[0-9]]+$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+nto-qnx)
+ lt_cv_deplibs_check_method=unknown
+ ;;
+
+openbsd*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB shared object'
+ else
+ lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method='file_magic COFF format alpha shared library'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sco3.2v5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+
+sysv5uw[[78]]* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ esac
+ ;;
+esac
+])
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+])
+
+
+# AC_PROG_NM - find the path to a BSD-compatible name lister
+AC_DEFUN([AC_PROG_NM],
+[AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+AC_MSG_CHECKING([for BSD-compatible nm])
+AC_CACHE_VAL(lt_cv_path_NM,
+[if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm=$ac_dir/${ac_tool_prefix}nm
+ if test -f $tmp_nm || test -f $tmp_nm$ac_exeext ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ if ($tmp_nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep '(/dev/null|Invalid file or object type)' >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ elif ($tmp_nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ else
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm
+fi])
+NM="$lt_cv_path_NM"
+AC_MSG_RESULT([$NM])
+])
+
+# AC_CHECK_LIBM - check for math library
+AC_DEFUN([AC_CHECK_LIBM],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cygwin* | *-*-pw32*)
+ # These system don't have libm
+ ;;
+*-ncr-sysv4.3*)
+ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+ AC_CHECK_LIB(m, main, LIBM="$LIBM -lm")
+ ;;
+*)
+ AC_CHECK_LIB(m, main, LIBM="-lm")
+ ;;
+esac
+])
+
+# AC_LIBLTDL_CONVENIENCE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl convenience library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-convenience to the
+# configure arguments. Note that LIBLTDL and INCLTDL are not
+# AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If DIR is not
+# provided, it is assumed to be `libltdl'. LIBLTDL will be prefixed
+# with '${top_builddir}/' and INCLTDL will be prefixed with
+# '${top_srcdir}/' (note the single quotes!). If your package is not
+# flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+AC_DEFUN([AC_LIBLTDL_CONVENIENCE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ case $enable_ltdl_convenience in
+ no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;;
+ "") enable_ltdl_convenience=yes
+ ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;;
+ esac
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+])
+
+# AC_LIBLTDL_INSTALLABLE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl installable library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-install to the configure
+# arguments. Note that LIBLTDL and INCLTDL are not AC_SUBSTed, nor is
+# AC_CONFIG_SUBDIRS called. If DIR is not provided and an installed
+# libltdl is not found, it is assumed to be `libltdl'. LIBLTDL will
+# be prefixed with '${top_builddir}/' and INCLTDL will be prefixed
+# with '${top_srcdir}/' (note the single quotes!). If your package is
+# not flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+# In the future, this macro may have to be called after AC_PROG_LIBTOOL.
+AC_DEFUN([AC_LIBLTDL_INSTALLABLE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ AC_CHECK_LIB(ltdl, main,
+ [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no],
+ [if test x"$enable_ltdl_install" = xno; then
+ AC_MSG_WARN([libltdl not installed, but installation disabled])
+ else
+ enable_ltdl_install=yes
+ fi
+ ])
+ if test x"$enable_ltdl_install" = x"yes"; then
+ ac_configure_args="$ac_configure_args --enable-ltdl-install"
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+ else
+ ac_configure_args="$ac_configure_args --enable-ltdl-install=no"
+ LIBLTDL="-lltdl"
+ INCLTDL=
+ fi
+])
+
+# old names
+AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL])
+AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+AC_DEFUN([AM_PROG_LD], [AC_PROG_LD])
+AC_DEFUN([AM_PROG_NM], [AC_PROG_NM])
+
+# This is just to silence aclocal about the macro not being used
+ifelse([AC_DISABLE_FAST_INSTALL])
diff --git a/storage/bdb/dist/aclocal/mutex.ac b/storage/bdb/dist/aclocal/mutex.ac
new file mode 100644
index 00000000000..f3f5529c74f
--- /dev/null
+++ b/storage/bdb/dist/aclocal/mutex.ac
@@ -0,0 +1,611 @@
+# $Id: mutex.ac,v 11.38 2002/07/25 20:07:52 sue Exp $
+
+# POSIX pthreads tests: inter-process safe and intra-process only.
+#
+# We need to run a test here, because the PTHREAD_PROCESS_SHARED flag compiles
+# fine on problematic systems, but won't actually work. This is a problem for
+# cross-compilation environments. I think inter-process mutexes are as likely
+# to fail in cross-compilation environments as real ones (especially since the
+# likely cross-compilation environment is Linux, where inter-process mutexes
+# don't currently work -- the latest estimate I've heard is Q1 2002, as part
+# of IBM's NGPT package). So:
+#
+# If checking for inter-process pthreads mutexes:
+# If it's local, run a test.
+# If it's a cross-compilation, fail.
+#
+# If the user specified pthreads mutexes and we're checking for intra-process
+# mutexes only:
+# If it's local, run a test.
+# If it's a cross-compilation, run a link-test.
+#
+# So, the thing you can't do here is configure for inter-process POSIX pthread
+# mutexes when cross-compiling. Since we're using the GNU/Cygnus toolchain for
+# cross-compilation, the target system is likely Linux or *BSD, so we're doing
+# the right thing.
+AC_DEFUN(AM_PTHREADS_SHARED, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],, [db_cv_mutex="no"])])
+AC_DEFUN(AM_PTHREADS_PRIVATE, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],,
+AC_TRY_LINK([
+#include <pthread.h>],[
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+], [db_cv_mutex="$1"]))])
+
+# Figure out mutexes for this compiler/architecture.
+AC_DEFUN(AM_DEFINE_MUTEXES, [
+
+# Mutexes we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_MUTEX_VMS, [Define to 1 to use VMS mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_VXWORKS, [Define to 1 to use VxWorks mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_WIN32, [Define to 1 to use Windows mutexes.])
+
+AC_CACHE_CHECK([for mutexes], db_cv_mutex, [
+db_cv_mutex=no
+
+orig_libs=$LIBS
+
+# User-specified POSIX or UI mutexes.
+#
+# There are two different reasons to specify mutexes: First, the application
+# is already using one type of mutex and doesn't want to mix-and-match (for
+# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the
+# applications POSIX pthreads mutexes don't support inter-process locking,
+# but the application wants to use them anyway (for example, current Linux
+# and *BSD systems).
+#
+# If we're on Solaris, we insist that -lthread or -lpthread be used. The
+# problem is the Solaris C library has UI/POSIX interface stubs, but they're
+# broken, configuring them for inter-process mutexes doesn't return an error,
+# but it doesn't work either. Otherwise, we try first without the library
+# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs
+# this. [#4950]
+#
+# Test for LWP threads before testing for UI/POSIX threads, we prefer them
+# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not
+# pwrite64, if they load the C library before the appropriate threads library,
+# e.g., tclsh using dlopen to load the DB library. By using LWP threads we
+# avoid answering lots of user questions, not to mention the bugs.
+if test "$db_cv_posixmutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="posix_library_only";;
+ *)
+ db_cv_mutex="posix_only";;
+ esac
+fi
+
+if test "$db_cv_uimutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="ui_library_only";;
+ *)
+ db_cv_mutex="ui_only";;
+ esac
+fi
+
+# LWP threads: _lwp_XXX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ static lwp_mutex_t mi = SHAREDMUTEX;
+ static lwp_cond_t ci = SHAREDCV;
+ lwp_mutex_t mutex = mi;
+ lwp_cond_t cond = ci;
+ exit (
+ _lwp_mutex_lock(&mutex) ||
+ _lwp_mutex_unlock(&mutex));
+], [db_cv_mutex="Solaris/lwp"])
+fi
+
+# UI threads: thr_XXX
+#
+# Try with and without the -lthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads"])
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+LIBS="$LIBS -lthread"
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads/library"])
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+ AC_MSG_ERROR([unable to find UI mutex interfaces])
+fi
+
+# POSIX.1 pthreads: pthread_XXX
+#
+# Try with and without the -lpthread library. If the user specified we use
+# POSIX pthreads mutexes, and we fail to find the full interface, try and
+# configure for just intra-process support.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_SHARED("POSIX/pthreads")
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "posix_only" -o "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_SHARED("POSIX/pthreads/library")
+ LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/private")
+fi
+if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private")
+ LIBS="$orig_libs"
+fi
+
+if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces])
+fi
+
+# msemaphore: HPPA only
+# Try HPPA before general msem test, it needs special alignment.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/mman.h>],[
+#if defined(__hppa)
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HP/msem_init"])
+fi
+
+# msemaphore: AIX, OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <sys/mman.h>],[
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+], [db_cv_mutex="UNIX/msem_init"])
+fi
+
+# ReliantUNIX
+if test "$db_cv_mutex" = no; then
+LIBS="$LIBS -lmproc"
+AC_TRY_LINK([
+#include <ulocks.h>],[
+ typedef spinlock_t tsl_t;
+ spinlock_t x;
+ initspin(&x, 1);
+ cspinlock(&x);
+ spinunlock(&x);
+], [db_cv_mutex="ReliantUNIX/initspin"])
+LIBS="$orig_libs"
+fi
+
+# SCO: UnixWare has threads in libthread, but OpenServer doesn't.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__USLC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="SCO/x86/cc-assembly"])
+fi
+
+# abilock_t: SGI
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <abi_mutex.h>],[
+ typedef abilock_t tsl_t;
+ abilock_t x;
+ init_lock(&x);
+ acquire_lock(&x);
+ release_lock(&x);
+], [db_cv_mutex="SGI/init_lock"])
+fi
+
+# sema_t: Solaris
+# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
+# turn this test on, unless we find some other platform that uses the old
+# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ typedef sema_t tsl_t;
+ sema_t x;
+ sema_init(&x, 1, USYNC_PROCESS, NULL);
+ sema_wait(&x);
+ sema_post(&x);
+], [db_cv_mutex="UNIX/sema_init"])
+fi
+
+# _lock_try/_lock_clear: Solaris
+# On Solaris systems without Pthread or UI mutex interfaces, DB uses the
+# undocumented _lock_try _lock_clear function calls instead of either the
+# sema_trywait(3T) or sema_wait(3T) function calls. This is because of
+# problems in those interfaces in some releases of the Solaris C library.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/machlock.h>],[
+ typedef lock_t tsl_t;
+ lock_t x;
+ _lock_try(&x);
+ _lock_clear(&x);
+], [db_cv_mutex="Solaris/_lock_try"])
+fi
+
+# _check_lock/_clear_lock: AIX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/atomic_op.h>],[
+ int x;
+ _check_lock(&x,0,1);
+ _clear_lock(&x,0);
+], [db_cv_mutex="AIX/_check_lock"])
+fi
+
+# Alpha/gcc: OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__alpha) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ALPHA/gcc-assembly"])
+fi
+
+# ARM/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__arm__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ARM/gcc-assembly"])
+fi
+
+# PaRisc/gcc: HP/UX
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HPPA/gcc-assembly"])
+fi
+
+# PPC/gcc:
+# Test for Apple first, it requires slightly different assembly.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) && defined(__APPLE__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_APPLE/gcc-assembly"])
+fi
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_GENERIC/gcc-assembly"])
+fi
+
+# Sparc/gcc: SunOS, Solaris
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__sparc__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="Sparc/gcc-assembly"])
+fi
+
+# 68K/gcc: SunOS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="68K/gcc-assembly"])
+fi
+
+# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(i386) || defined(__i386__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="x86/gcc-assembly"])
+fi
+
+# S390/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__s390__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="S390/gcc-assembly"])
+fi
+
+# ia86/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__ia64) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ia64/gcc-assembly"])
+fi
+
+# uts/cc: UTS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(_UTS)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="UTS/cc-assembly"])
+fi
+
+# default to UNIX fcntl system call mutexes.
+if test "$db_cv_mutex" = no; then
+ db_cv_mutex="UNIX/fcntl"
+fi
+])
+
+case "$db_cv_mutex" in
+68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_68K_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and 68K assembly language mutexes.]);;
+AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_AIX_CHECK_LOCK,
+ [Define to 1 to use the AIX _check_lock mutexes.]);;
+ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Alpha assembly language mutexes.]);;
+ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ARM_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ARM_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and ARM assembly language mutexes.]);;
+HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on HP-UX.]);;
+HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.]);;
+ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_IA64_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and IA64 assembly language mutexes.]);;
+POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+POSIX/pthreads/library) LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/library/private)
+ LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+PPC_GENERIC/gcc-assembly)
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and generic PowerPC assembly language.]);;
+PPC_APPLE/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Apple PowerPC assembly language.]);;
+ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN)
+ AH_TEMPLATE(HAVE_MUTEX_RELIANTUNIX_INITSPIN,
+ [Define to 1 to use Reliant UNIX initspin mutexes.]);;
+S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_S390_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_S390_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and S/390 assembly language mutexes.]);;
+SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY,
+ [Define to 1 to use the SCO compiler and x86 assembly language mutexes.]);;
+SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_SGI_INIT_LOCK,
+ [Define to 1 to use the SGI XXX_lock mutexes.]);;
+Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LOCK_TRY,
+ [Define to 1 to use the Solaris _lock_XXX mutexes.]);;
+Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LWP,
+ [Define to 1 to use the Solaris lwp threads mutexes.]);;
+Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Sparc assembly language mutexes.]);;
+UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UI/threads/library) LIBS="$LIBS -lthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on systems other than HP-UX.]);;
+UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SEMA_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_SEMA_INIT,
+ [Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes.]);;
+UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
+ AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_UTS_CC_ASSEMBLY,
+ [Define to 1 to use the UTS compiler and assembly language mutexes.]);;
+x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_X86_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and x86 assembly language mutexes.]);;
+UNIX/fcntl) AC_MSG_WARN(
+ [NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE.])
+ ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_FCNTL)
+ AH_TEMPLATE(HAVE_MUTEX_FCNTL,
+ [Define to 1 to use the UNIX fcntl system call mutexes.]);;
+*) AC_MSG_ERROR([Unknown mutex interface: $db_cv_mutex]);;
+esac
+
+if test "$db_cv_mutex" != "UNIX/fcntl"; then
+ AC_DEFINE(HAVE_MUTEX_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_THREADS,
+ [Define to 1 if fast mutexes are available.])
+fi
+
+# There are 3 classes of mutexes:
+#
+# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes.
+# 2: Mutexes that must be destroyed, but which don't hold permanent system
+# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS.
+# 3: Mutexes that must be destroyed, even after the process is gone, for
+# example, pthread mutexes on QNX and binary semaphores on VxWorks.
+#
+# DB cannot currently distinguish between #2 and #3 because DB does not know
+# if the application is running environment recovery as part of startup and
+# does not need to do cleanup, or if the environment is being removed and/or
+# recovered in a loop in the application, and so does need to clean up. If
+# we get it wrong, we're going to call the mutex destroy routine on a random
+# piece of memory, which usually works, but just might drop core. For now,
+# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we
+# have a better solution or reason to solve this in a general way -- so far,
+# the places we've needed to handle this are few.
+AH_TEMPLATE(HAVE_MUTEX_SYSTEM_RESOURCES,
+ [Define to 1 if mutexes hold system resources.])
+
+case "$host_os$db_cv_mutex" in
+*qnx*POSIX/pthread*|openedition*POSIX/pthread*)
+ AC_DEFINE(HAVE_MUTEX_SYSTEM_RESOURCES);;
+esac])
diff --git a/storage/bdb/dist/aclocal/options.ac b/storage/bdb/dist/aclocal/options.ac
new file mode 100644
index 00000000000..ba45c34dfe9
--- /dev/null
+++ b/storage/bdb/dist/aclocal/options.ac
@@ -0,0 +1,197 @@
+# $Id: options.ac,v 11.19 2002/06/25 19:31:48 bostic Exp $
+
+# Process user-specified options.
+AC_DEFUN(AM_OPTIONS_SET, [
+
+# --enable-bigfile was the configuration option that Berkeley DB used before
+# autoconf 2.50 was released (which had --enable-largefile integrated in).
+AC_ARG_ENABLE(bigfile,
+ [AC_HELP_STRING([--disable-bigfile],
+ [Obsolete; use --disable-largefile instead.])],
+ [AC_MSG_ERROR(
+ [--enable-bigfile no longer supported, use --enable-largefile])])
+
+AC_MSG_CHECKING(if --enable-compat185 option specified)
+AC_ARG_ENABLE(compat185,
+ [AC_HELP_STRING([--enable-compat185],
+ [Build DB 1.85 compatibility API.])],
+ [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"])
+AC_MSG_RESULT($db_cv_compat185)
+
+AC_MSG_CHECKING(if --enable-cxx option specified)
+AC_ARG_ENABLE(cxx,
+ [AC_HELP_STRING([--enable-cxx],
+ [Build C++ API.])],
+ [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"])
+AC_MSG_RESULT($db_cv_cxx)
+
+AC_MSG_CHECKING(if --enable-debug option specified)
+AC_ARG_ENABLE(debug,
+ [AC_HELP_STRING([--enable-debug],
+ [Build a debugging version.])],
+ [db_cv_debug="$enable_debug"], [db_cv_debug="no"])
+AC_MSG_RESULT($db_cv_debug)
+
+AC_MSG_CHECKING(if --enable-debug_rop option specified)
+AC_ARG_ENABLE(debug_rop,
+ [AC_HELP_STRING([--enable-debug_rop],
+ [Build a version that logs read operations.])],
+ [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"])
+AC_MSG_RESULT($db_cv_debug_rop)
+
+AC_MSG_CHECKING(if --enable-debug_wop option specified)
+AC_ARG_ENABLE(debug_wop,
+ [AC_HELP_STRING([--enable-debug_wop],
+ [Build a version that logs write operations.])],
+ [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"])
+AC_MSG_RESULT($db_cv_debug_wop)
+
+AC_MSG_CHECKING(if --enable-diagnostic option specified)
+AC_ARG_ENABLE(diagnostic,
+ [AC_HELP_STRING([--enable-diagnostic],
+ [Build a version with run-time diagnostics.])],
+ [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"])
+AC_MSG_RESULT($db_cv_diagnostic)
+
+AC_MSG_CHECKING(if --enable-dump185 option specified)
+AC_ARG_ENABLE(dump185,
+ [AC_HELP_STRING([--enable-dump185],
+ [Build db_dump185(1) to dump 1.85 databases.])],
+ [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"])
+AC_MSG_RESULT($db_cv_dump185)
+
+AC_MSG_CHECKING(if --enable-java option specified)
+AC_ARG_ENABLE(java,
+ [AC_HELP_STRING([--enable-java],
+ [Build Java API.])],
+ [db_cv_java="$enable_java"], [db_cv_java="no"])
+AC_MSG_RESULT($db_cv_java)
+
+AC_MSG_CHECKING(if --enable-posixmutexes option specified)
+AC_ARG_ENABLE(posixmutexes,
+ [AC_HELP_STRING([--enable-posixmutexes],
+ [Force use of POSIX standard mutexes.])],
+ [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
+AC_MSG_RESULT($db_cv_posixmutexes)
+
+AC_MSG_CHECKING(if --enable-rpc option specified)
+AC_ARG_ENABLE(rpc,
+ [AC_HELP_STRING([--enable-rpc],
+ [Build RPC client/server.])],
+ [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"])
+AC_MSG_RESULT($db_cv_rpc)
+
+AC_MSG_CHECKING(if --enable-tcl option specified)
+AC_ARG_ENABLE(tcl,
+ [AC_HELP_STRING([--enable-tcl],
+ [Build Tcl API.])],
+ [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"])
+AC_MSG_RESULT($db_cv_tcl)
+
+AC_MSG_CHECKING(if --enable-test option specified)
+AC_ARG_ENABLE(test,
+ [AC_HELP_STRING([--enable-test],
+ [Configure to run the test suite.])],
+ [db_cv_test="$enable_test"], [db_cv_test="no"])
+AC_MSG_RESULT($db_cv_test)
+
+AC_MSG_CHECKING(if --enable-uimutexes option specified)
+AC_ARG_ENABLE(uimutexes,
+ [AC_HELP_STRING([--enable-uimutexes],
+ [Force use of Unix International mutexes.])],
+ [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"])
+AC_MSG_RESULT($db_cv_uimutexes)
+
+AC_MSG_CHECKING(if --enable-umrw option specified)
+AC_ARG_ENABLE(umrw,
+ [AC_HELP_STRING([--enable-umrw],
+ [Mask harmless unitialized memory read/writes.])],
+ [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"])
+AC_MSG_RESULT($db_cv_umrw)
+
+AC_MSG_CHECKING([if --with-embedix=DIR option specified])
+AC_ARG_WITH(embedix,
+ [AC_HELP_STRING([--with-embedix=DIR],
+ [Embedix install directory location.])],
+ [with_embedix="$withval"], [with_embedix="no"])
+if test "$with_embedix" = "no"; then
+ db_cv_embedix="no"
+ AC_MSG_RESULT($with_embedix)
+else
+ db_cv_embedix="yes"
+ if test "$with_embedix" = "yes"; then
+ db_cv_path_embedix_install="/opt/Embedix"
+ else
+ db_cv_path_embedix_install="$with_embedix"
+ fi
+ AC_MSG_RESULT($db_cv_path_embedix_install)
+fi
+
+AC_MSG_CHECKING(if --with-mutex=MUTEX option specified)
+AC_ARG_WITH(mutex,
+ [AC_HELP_STRING([--with-mutex=MUTEX],
+ [Selection of non-standard mutexes.])],
+ [with_mutex="$withval"], [with_mutex="no"])
+if test "$with_mutex" = "yes"; then
+ AC_MSG_ERROR([--with-mutex requires a mutex name argument])
+fi
+if test "$with_mutex" != "no"; then
+ db_cv_mutex="$with_mutex"
+fi
+AC_MSG_RESULT($with_mutex)
+
+AC_MSG_CHECKING(if --with-rpm=DIR option specified)
+AC_ARG_WITH(rpm,
+ [AC_HELP_STRING([--with-rpm=DIR],
+ [Directory location of RPM archive.])],
+ [with_rpm="$withval"], [with_rpm="no"])
+if test "$with_rpm" = "no"; then
+ db_cv_rpm="no"
+else
+ if test "$with_rpm" = "yes"; then
+ AC_MSG_ERROR([--with-rpm requires a directory argument])
+ fi
+ db_cv_rpm="yes"
+ db_cv_path_rpm_archive="$with_rpm"
+fi
+AC_MSG_RESULT($with_rpm)
+
+AC_MSG_CHECKING([if --with-tcl=DIR option specified])
+AC_ARG_WITH(tcl,
+ [AC_HELP_STRING([--with-tcl=DIR],
+ [Directory location of tclConfig.sh.])],
+ [with_tclconfig="$withval"], [with_tclconfig="no"])
+AC_MSG_RESULT($with_tclconfig)
+if test "$with_tclconfig" != "no"; then
+ db_cv_tcl="yes"
+fi
+
+AC_MSG_CHECKING([if --with-uniquename=NAME option specified])
+AC_ARG_WITH(uniquename,
+ [AC_HELP_STRING([--with-uniquename=NAME],
+ [Build a uniquely named library.])],
+ [with_uniquename="$withval"], [with_uniquename="no"])
+if test "$with_uniquename" = "no"; then
+ db_cv_uniquename="no"
+ AC_MSG_RESULT($with_uniquename)
+else
+ db_cv_uniquename="yes"
+ if test "$with_uniquename" != "yes"; then
+ DB_VERSION_UNIQUE_NAME="$with_uniquename"
+ fi
+ AC_MSG_RESULT($DB_VERSION_UNIQUE_NAME)
+fi
+
+# Embedix requires RPM.
+if test "$db_cv_embedix" = "yes"; then
+ if test "$db_cv_rpm" = "no"; then
+ AC_MSG_ERROR([--with-embedix requires --with-rpm])
+ fi
+fi
+
+# Test requires Tcl
+if test "$db_cv_test" = "yes"; then
+ if test "$db_cv_tcl" = "no"; then
+ AC_MSG_ERROR([--enable-test requires --enable-tcl])
+ fi
+fi])
diff --git a/storage/bdb/dist/aclocal/programs.ac b/storage/bdb/dist/aclocal/programs.ac
new file mode 100644
index 00000000000..7bfa1fa2646
--- /dev/null
+++ b/storage/bdb/dist/aclocal/programs.ac
@@ -0,0 +1,80 @@
+# $Id: programs.ac,v 11.20 2001/09/24 02:09:25 bostic Exp $
+
+# Check for programs used in building/installation.
+AC_DEFUN(AM_PROGRAMS_SET, [
+
+AC_CHECK_TOOL(db_cv_path_ar, ar, missing_ar)
+if test "$db_cv_path_ar" = missing_ar; then
+ AC_MSG_ERROR([No ar utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_chmod, chmod, missing_chmod)
+if test "$db_cv_path_chmod" = missing_chmod; then
+ AC_MSG_ERROR([No chmod utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_cp, cp, missing_cp)
+if test "$db_cv_path_cp" = missing_cp; then
+ AC_MSG_ERROR([No cp utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(path_ldconfig, ldconfig, missing_ldconfig)
+ AC_PATH_PROG(db_cv_path_ldconfig, $path_ldconfig, missing_ldconfig)
+ if test "$db_cv_path_ldconfig" != missing_ldconfig; then
+ RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig"
+ RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig"
+ fi
+fi
+
+AC_CHECK_TOOL(db_cv_path_ln, ln, missing_ln)
+if test "$db_cv_path_ln" = missing_ln; then
+ AC_MSG_ERROR([No ln utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_mkdir, mkdir, missing_mkdir)
+if test "$db_cv_path_mkdir" = missing_mkdir; then
+ AC_MSG_ERROR([No mkdir utility found.])
+fi
+
+# We need a complete path for ranlib, because it doesn't exist on some
+# architectures because the ar utility packages the library itself.
+AC_CHECK_TOOL(path_ranlib, ranlib, missing_ranlib)
+AC_PATH_PROG(db_cv_path_ranlib, $path_ranlib, missing_ranlib)
+
+AC_CHECK_TOOL(db_cv_path_rm, rm, missing_rm)
+if test "$db_cv_path_rm" = missing_rm; then
+ AC_MSG_ERROR([No rm utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_rpm, rpm, missing_rpm)
+ if test "$db_cv_path_rpm" = missing_rpm; then
+ AC_MSG_ERROR([No rpm utility found.])
+ fi
+fi
+
+# We need a complete path for sh, because some implementations of make
+# get upset if SHELL is set to just the command name.
+AC_CHECK_TOOL(path_sh, sh, missing_sh)
+AC_PATH_PROG(db_cv_path_sh, $path_sh, missing_sh)
+if test "$db_cv_path_sh" = missing_sh; then
+ AC_MSG_ERROR([No sh utility found.])
+fi
+
+# Don't strip the binaries if --enable-debug was specified.
+if test "$db_cv_debug" = yes; then
+ db_cv_path_strip=debug_build_no_strip
+else
+ AC_CHECK_TOOL(path_strip, strip, missing_strip)
+ AC_PATH_PROG(db_cv_path_strip, $path_strip, missing_strip)
+fi
+
+if test "$db_cv_test" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_kill, kill, missing_kill)
+ if test "$db_cv_path_kill" = missing_kill; then
+ AC_MSG_ERROR([No kill utility found.])
+ fi
+fi
+
+])
diff --git a/storage/bdb/dist/aclocal/sosuffix.ac b/storage/bdb/dist/aclocal/sosuffix.ac
new file mode 100644
index 00000000000..1197128293b
--- /dev/null
+++ b/storage/bdb/dist/aclocal/sosuffix.ac
@@ -0,0 +1,69 @@
+# $Id: sosuffix.ac,v 1.1 2002/07/08 13:15:05 dda Exp $
+# Determine shared object suffixes.
+#
+# Our method is to use the libtool variable $library_names_spec,
+# set by using AC_PROG_LIBTOOL. This variable is a snippet of shell
+# defined in terms of $versuffix, $release, $libname, $module and $jnimodule.
+# We want to eval it and grab the suffix used for shared objects.
+# By setting $module and $jnimodule to yes/no, we obtain the suffixes
+# used to create dlloadable, or java loadable modules.
+# On many (*nix) systems, these all evaluate to .so, but there
+# are some notable exceptions.
+
+# This macro is used internally to discover the suffix for the current
+# settings of $module and $jnimodule. The result is stored in $_SOSUFFIX.
+AC_DEFUN(_SOSUFFIX_INTERNAL, [
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([[a-zA-Z0-9_]]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ AC_MSG_WARN([libtool may not know about this architecture.])
+ AC_MSG_WARN([assuming .$_SUFFIX suffix for dynamic libraries.])
+ fi
+ fi
+])
+
+# SOSUFFIX_CONFIG will set the variable SOSUFFIX to be the
+# shared library extension used for general linking, not dlopen.
+AC_DEFUN(SOSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([SOSUFFIX from libtool])
+ module=no
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ SOSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($SOSUFFIX)
+ AC_SUBST(SOSUFFIX)
+])
+
+# MODSUFFIX_CONFIG will set the variable MODSUFFIX to be the
+# shared library extension used for dlopen'ed modules.
+# To discover this, we set $module, simulating libtool's -module option.
+AC_DEFUN(MODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([MODSUFFIX from libtool])
+ module=yes
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ MODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($MODSUFFIX)
+ AC_SUBST(MODSUFFIX)
+])
+
+# JMODSUFFIX_CONFIG will set the variable JMODSUFFIX to be the
+# shared library extension used JNI modules opened by Java.
+# To discover this, we set $jnimodule, simulating libtool's -jnimodule option.
+# -jnimodule is currently a Sleepycat local extension to libtool.
+AC_DEFUN(JMODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([JMODSUFFIX from libtool])
+ module=yes
+ jnimodule=yes
+ _SOSUFFIX_INTERNAL
+ JMODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($JMODSUFFIX)
+ AC_SUBST(JMODSUFFIX)
+])
+
diff --git a/storage/bdb/dist/aclocal/tcl.ac b/storage/bdb/dist/aclocal/tcl.ac
new file mode 100644
index 00000000000..80ed19c5a97
--- /dev/null
+++ b/storage/bdb/dist/aclocal/tcl.ac
@@ -0,0 +1,136 @@
+# $Id: tcl.ac,v 11.14 2002/09/07 17:25:58 dda Exp $
+
+# The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
+# 8.3.0 distribution, with some minor changes. For this reason, license
+# terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as
+# follows (copied from the license.terms file in the Tcl 8.3 distribution):
+#
+# This software is copyrighted by the Regents of the University of
+# California, Sun Microsystems, Inc., Scriptics Corporation,
+# and other parties. The following terms apply to all files associated
+# with the software unless explicitly disclaimed in individual files.
+#
+# The authors hereby grant permission to use, copy, modify, distribute,
+# and license this software and its documentation for any purpose, provided
+# that existing copyright notices are retained in all copies and that this
+# notice is included verbatim in any distributions. No written agreement,
+# license, or royalty fee is required for any of the authorized uses.
+# Modifications to this software may be copyrighted by their authors
+# and need not follow the licensing terms described here, provided that
+# the new terms are clearly indicated on the first page of each file where
+# they apply.
+#
+# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+# MODIFICATIONS.
+#
+# GOVERNMENT USE: If you are acquiring this software on behalf of the
+# U.S. government, the Government shall have only "Restricted Rights"
+# in the software and related documentation as defined in the Federal
+# Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+# are acquiring the software on behalf of the Department of Defense, the
+# software shall be classified as "Commercial Computer Software" and the
+# Government shall have only "Restricted Rights" as defined in Clause
+# 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
+# authors grant the U.S. Government and others acting in its behalf
+# permission to use and distribute the software in accordance with the
+# terms specified in this license.
+
+AC_DEFUN(SC_PATH_TCLCONFIG, [
+ AC_CACHE_VAL(ac_cv_c_tclconfig,[
+
+ # First check to see if --with-tclconfig was specified.
+ if test "${with_tclconfig}" != no; then
+ if test -f "${with_tclconfig}/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+ else
+ AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
+ fi
+ fi
+
+ # check in a few common install locations
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ for i in `ls -d /usr/local/lib 2>/dev/null` ; do
+ if test -f "$i/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd $i; pwd)`
+ break
+ fi
+ done
+ fi
+
+ ])
+
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ TCL_BIN_DIR="# no Tcl configs found"
+ AC_MSG_ERROR(can't find Tcl configuration definitions)
+ else
+ TCL_BIN_DIR=${ac_cv_c_tclconfig}
+ fi
+])
+
+AC_DEFUN(SC_LOAD_TCLCONFIG, [
+ AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
+
+ if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+ AC_MSG_RESULT([loading])
+ . $TCL_BIN_DIR/tclConfig.sh
+ else
+ AC_MSG_RESULT([file not found])
+ fi
+
+ #
+ # The eval is required to do the TCL_DBGX substitution in the
+ # TCL_LIB_FILE variable
+ #
+ eval TCL_LIB_FILE="${TCL_LIB_FILE}"
+ eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+ eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
+
+ #
+ # If the DB Tcl library isn't loaded with the Tcl spec and library
+ # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at
+ # load time. [#4843] Furthermore, with Tcl 8.3, the link flags
+ # given by the Tcl spec are insufficient for our use. [#5779]
+ #
+ case "$host_os" in
+ aix4.[[2-9]].*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG"
+ LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";;
+ aix*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";;
+ esac
+ AC_SUBST(TCL_BIN_DIR)
+ AC_SUBST(TCL_SRC_DIR)
+ AC_SUBST(TCL_LIB_FILE)
+
+ AC_SUBST(TCL_TCLSH)
+ TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
+])
+
+# Optional Tcl API.
+AC_DEFUN(AM_TCL_LOAD, [
+if test "$db_cv_tcl" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Tcl requires shared libraries])
+ fi
+
+ AC_SUBST(TCFLAGS)
+
+ SC_PATH_TCLCONFIG
+ SC_LOAD_TCLCONFIG
+
+ if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
+ TCFLAGS="-I$TCL_PREFIX/include"
+ fi
+
+ INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)"
+fi])
diff --git a/storage/bdb/dist/aclocal/types.ac b/storage/bdb/dist/aclocal/types.ac
new file mode 100644
index 00000000000..db8aaac6884
--- /dev/null
+++ b/storage/bdb/dist/aclocal/types.ac
@@ -0,0 +1,146 @@
+# $Id: types.ac,v 11.10 2001/12/10 14:16:49 bostic Exp $
+
+# db.h includes <sys/types.h> and <stdio.h>, not the other default includes
+# autoconf usually includes. For that reason, we specify a set of includes
+# for all type checking tests. [#5060]
+AC_DEFUN(DB_INCLUDES, [[
+#include <sys/types.h>
+#include <stdio.h>]])
+
+# Check the sizes we know about, and see if any of them match what's needed.
+#
+# Prefer ints to anything else, because read, write and others historically
+# returned an int.
+AC_DEFUN(AM_SEARCH_USIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_unsigned_int")
+ $1="typedef unsigned int $2;";;
+ "$ac_cv_sizeof_unsigned_char")
+ $1="typedef unsigned char $2;";;
+ "$ac_cv_sizeof_unsigned_short")
+ $1="typedef unsigned short $2;";;
+ "$ac_cv_sizeof_unsigned_long")
+ $1="typedef unsigned long $2;";;
+ *)
+ AC_MSG_ERROR([No unsigned $3-byte integral type]);;
+ esac])
+AC_DEFUN(AM_SEARCH_SSIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_int")
+ $1="typedef int $2;";;
+ "$ac_cv_sizeof_char")
+ $1="typedef char $2;";;
+ "$ac_cv_sizeof_short")
+ $1="typedef short $2;";;
+ "$ac_cv_sizeof_long")
+ $1="typedef long $2;";;
+ *)
+ AC_MSG_ERROR([No signed $3-byte integral type]);;
+ esac])
+
+# Check for the standard system types.
+AC_DEFUN(AM_TYPES, [
+
+# We need to know the sizes of various objects on this system.
+# We don't use the SIZEOF_XXX values created by autoconf.
+AC_CHECK_SIZEOF(char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(size_t,, DB_INCLUDES)
+AC_CHECK_SIZEOF(char *,, DB_INCLUDES)
+
+# We require off_t and size_t, and we don't try to substitute our own
+# if we can't find them.
+AC_CHECK_TYPE(off_t,,, DB_INCLUDES)
+if test "$ac_cv_type_off_t" = no; then
+ AC_MSG_ERROR([No off_t type.])
+fi
+
+AC_CHECK_TYPE(size_t,,, DB_INCLUDES)
+if test "$ac_cv_type_size_t" = no; then
+ AC_MSG_ERROR([No size_t type.])
+fi
+
+# We look for u_char, u_short, u_int, u_long -- if we can't find them,
+# we create our own.
+AC_SUBST(u_char_decl)
+AC_CHECK_TYPE(u_char,,, DB_INCLUDES)
+if test "$ac_cv_type_u_char" = no; then
+ u_char_decl="typedef unsigned char u_char;"
+fi
+
+AC_SUBST(u_short_decl)
+AC_CHECK_TYPE(u_short,,, DB_INCLUDES)
+if test "$ac_cv_type_u_short" = no; then
+ u_short_decl="typedef unsigned short u_short;"
+fi
+
+AC_SUBST(u_int_decl)
+AC_CHECK_TYPE(u_int,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int" = no; then
+ u_int_decl="typedef unsigned int u_int;"
+fi
+
+AC_SUBST(u_long_decl)
+AC_CHECK_TYPE(u_long,,, DB_INCLUDES)
+if test "$ac_cv_type_u_long" = no; then
+ u_long_decl="typedef unsigned long u_long;"
+fi
+
+AC_SUBST(u_int8_decl)
+AC_CHECK_TYPE(u_int8_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int8_t" = no; then
+ AM_SEARCH_USIZES(u_int8_decl, u_int8_t, 1)
+fi
+
+AC_SUBST(u_int16_decl)
+AC_CHECK_TYPE(u_int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int16_t" = no; then
+ AM_SEARCH_USIZES(u_int16_decl, u_int16_t, 2)
+fi
+
+AC_SUBST(int16_decl)
+AC_CHECK_TYPE(int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int16_t" = no; then
+ AM_SEARCH_SSIZES(int16_decl, int16_t, 2)
+fi
+
+AC_SUBST(u_int32_decl)
+AC_CHECK_TYPE(u_int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int32_t" = no; then
+ AM_SEARCH_USIZES(u_int32_decl, u_int32_t, 4)
+fi
+
+AC_SUBST(int32_decl)
+AC_CHECK_TYPE(int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int32_t" = no; then
+ AM_SEARCH_SSIZES(int32_decl, int32_t, 4)
+fi
+
+# Check for ssize_t -- if none exists, find a signed integral type that's
+# the same size as a size_t.
+AC_SUBST(ssize_t_decl)
+AC_CHECK_TYPE(ssize_t,,, DB_INCLUDES)
+if test "$ac_cv_type_ssize_t" = no; then
+ AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)
+fi
+
+# Find the largest integral type.
+AC_SUBST(db_align_t_decl)
+AC_CHECK_TYPE(unsigned long long,,, DB_INCLUDES)
+if test "$ac_cv_type_unsigned_long_long" = no; then
+ db_align_t_decl="typedef unsigned long db_align_t;"
+else
+ db_align_t_decl="typedef unsigned long long db_align_t;"
+fi
+
+# Find an integral type which is the same size as a pointer.
+AC_SUBST(db_alignp_t_decl)
+AM_SEARCH_USIZES(db_alignp_t_decl, db_alignp_t, $ac_cv_sizeof_char_p)
+
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_check_class.ac b/storage/bdb/dist/aclocal_java/ac_check_class.ac
new file mode 100644
index 00000000000..915198af567
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_check_class.ac
@@ -0,0 +1,107 @@
+dnl @synopsis AC_CHECK_CLASS
+dnl
+dnl AC_CHECK_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_class.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_CLASS],[
+AC_REQUIRE([AC_PROG_JAVA])
+ac_var_name=`echo $1 | sed 's/\./_/g'`
+dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is
+dnl dynamic I need an extra level of extraction
+AC_MSG_CHECKING([for $1 class])
+AC_CACHE_VAL(ac_cv_class_$ac_var_name, [
+if test x$ac_cv_prog_uudecode_base64 = xyes; then
+dnl /**
+dnl * Test.java: used to test dynamicaly if a class exists.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl Class lib;
+dnl if (argv.length < 1)
+dnl {
+dnl System.err.println ("Missing argument");
+dnl System.exit (77);
+dnl }
+dnl try
+dnl {
+dnl lib = Class.forName (argv[0]);
+dnl }
+dnl catch (ClassNotFoundException e)
+dnl {
+dnl System.exit (1);
+dnl }
+dnl lib = null;
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ
+AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt
+ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV
+ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp
+VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM
+amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi
+AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B
+AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA
+AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN
+uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK
+AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA
+JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA
+JwAAAAIAKA==
+====
+EOF
+ if uudecode$EXEEXT Test.uue; then
+ :
+ else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+ fi
+ rm -f Test.uue
+ if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then
+ eval "ac_cv_class_$ac_var_name=yes"
+ else
+ eval "ac_cv_class_$ac_var_name=no"
+ fi
+ rm -f Test.class
+else
+ AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"],
+ [eval "ac_cv_class_$ac_var_name=no"])
+fi
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`"
+HAVE_LAST_CLASS=$ac_var_val
+if test x$ac_var_val = xyes; then
+ ifelse([$2], , :, [$2])
+else
+ ifelse([$3], , :, [$3])
+fi
+])
+dnl for some reason the above statment didn't fall though here?
+dnl do scripts have variable scoping?
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+AC_MSG_RESULT($ac_var_val)
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_check_classpath.ac b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
new file mode 100644
index 00000000000..4a78d0f8785
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_check_classpath.ac
@@ -0,0 +1,23 @@
+dnl @synopsis AC_CHECK_CLASSPATH
+dnl
+dnl AC_CHECK_CLASSPATH just displays the CLASSPATH, for the edification
+dnl of the user.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_classpath.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_CLASSPATH],[
+if test "x$CLASSPATH" = x; then
+ echo "You have no CLASSPATH, I hope it is good"
+else
+ echo "You have CLASSPATH $CLASSPATH, hope it is correct"
+fi
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_check_junit.ac b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
new file mode 100644
index 00000000000..3b81d1dc3fc
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_check_junit.ac
@@ -0,0 +1,54 @@
+dnl @synopsis AC_CHECK_JUNIT
+dnl
+dnl AC_CHECK_JUNIT tests the availability of the Junit testing
+dnl framework, and set some variables for conditional compilation
+dnl of the test suite by automake.
+dnl
+dnl If available, JUNIT is set to a command launching the text
+dnl based user interface of Junit, @JAVA_JUNIT@ is set to $JAVA_JUNIT
+dnl and @TESTS_JUNIT@ is set to $TESTS_JUNIT, otherwise they are set
+dnl to empty values.
+dnl
+dnl You can use these variables in your Makefile.am file like this :
+dnl
+dnl # Some of the following classes are built only if junit is available
+dnl JAVA_JUNIT = Class1Test.java Class2Test.java AllJunitTests.java
+dnl
+dnl noinst_JAVA = Example1.java Example2.java @JAVA_JUNIT@
+dnl
+dnl EXTRA_JAVA = $(JAVA_JUNIT)
+dnl
+dnl TESTS_JUNIT = AllJunitTests
+dnl
+dnl TESTS = StandaloneTest1 StandaloneTest2 @TESTS_JUNIT@
+dnl
+dnl EXTRA_TESTS = $(TESTS_JUNIT)
+dnl
+dnl AllJunitTests :
+dnl echo "#! /bin/sh" > $@
+dnl echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@
+dnl chmod +x $@
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id: ac_check_junit.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_JUNIT],[
+AC_CACHE_VAL(ac_cv_prog_JUNIT,[
+AC_CHECK_CLASS(junit.textui.TestRunner)
+if test x"`eval 'echo $ac_cv_class_junit_textui_TestRunner'`" != xno ; then
+ ac_cv_prog_JUNIT='$(CLASSPATH_ENV) $(JAVA) $(JAVAFLAGS) junit.textui.TestRunner'
+fi])
+AC_MSG_CHECKING([for junit])
+if test x"`eval 'echo $ac_cv_prog_JUNIT'`" != x ; then
+ JUNIT="$ac_cv_prog_JUNIT"
+ JAVA_JUNIT='$(JAVA_JUNIT)'
+ TESTS_JUNIT='$(TESTS_JUNIT)'
+else
+ JUNIT=
+ JAVA_JUNIT=
+ TESTS_JUNIT=
+fi
+AC_MSG_RESULT($JAVA_JUNIT)
+AC_SUBST(JUNIT)
+AC_SUBST(JAVA_JUNIT)
+AC_SUBST(TESTS_JUNIT)])
diff --git a/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
new file mode 100644
index 00000000000..ab62e33c887
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_CHECK_RQRD_CLASS
+dnl
+dnl AC_CHECK_RQRD_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file and fails if it doesn't exist.
+dnl Its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_rqrd_class.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+
+AC_DEFUN([AC_CHECK_RQRD_CLASS],[
+CLASS=`echo $1|sed 's/\./_/g'`
+AC_CHECK_CLASS($1)
+if test "$HAVE_LAST_CLASS" = "no"; then
+ AC_MSG_ERROR([Required class $1 missing, exiting.])
+fi
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_java_options.ac b/storage/bdb/dist/aclocal_java/ac_java_options.ac
new file mode 100644
index 00000000000..567afca7fa5
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_java_options.ac
@@ -0,0 +1,32 @@
+dnl @synopsis AC_JAVA_OPTIONS
+dnl
+dnl AC_JAVA_OPTIONS adds configure command line options used for Java m4
+dnl macros. This Macro is optional.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_java_options.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_JAVA_OPTIONS],[
+AC_ARG_WITH(java-prefix,
+ [ --with-java-prefix=PFX prefix where Java runtime is installed (optional)])
+AC_ARG_WITH(javac-flags,
+ [ --with-javac-flags=FLAGS flags to pass to the Java compiler (optional)])
+AC_ARG_WITH(java-flags,
+ [ --with-java-flags=FLAGS flags to pass to the Java VM (optional)])
+JAVAPREFIX=$with_java_prefix
+JAVACFLAGS=$with_javac_flags
+JAVAFLAGS=$with_java_flags
+AC_SUBST(JAVAPREFIX)dnl
+AC_SUBST(JAVACFLAGS)dnl
+AC_SUBST(JAVAFLAGS)dnl
+AC_SUBST(JAVA)dnl
+AC_SUBST(JAVAC)dnl
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
new file mode 100644
index 00000000000..65cfbbfd13e
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
@@ -0,0 +1,112 @@
+dnl @synopsis AC_JNI_INCLUDE_DIR
+dnl
+dnl AC_JNI_INCLUDE_DIR finds include directories needed
+dnl for compiling programs using the JNI interface.
+dnl
+dnl JNI include directories are usually in the java distribution
+dnl This is deduced from the value of JAVAC. When this macro
+dnl completes, a list of directories is left in the variable
+dnl JNI_INCLUDE_DIRS.
+dnl
+dnl Example usage follows:
+dnl
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+dnl do
+dnl CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+dnl done
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl Note: This macro can work with the autoconf M4 macros for Java programs.
+dnl This particular macro is not part of the original set of macros.
+dnl
+dnl @author Don Anderson <dda@sleepycat.com>
+dnl @version $Id: ac_jni_include_dirs.ac,v 1.8 2002/09/04 21:27:30 dda Exp $
+dnl
+AC_DEFUN(AC_JNI_INCLUDE_DIR,[
+
+JNI_INCLUDE_DIRS=""
+
+test "x$JAVAC" = x && AC_MSG_ERROR(['$JAVAC' undefined])
+AC_PATH_PROG(_ACJNI_JAVAC, $JAVAC, $JAVAC)
+test ! -x "$_ACJNI_JAVAC" && AC_MSG_ERROR([$JAVAC could not be found in path])
+AC_MSG_CHECKING(absolute path of $JAVAC)
+case "$_ACJNI_JAVAC" in
+/*) AC_MSG_RESULT($_ACJNI_JAVAC);;
+*) AC_MSG_ERROR([$_ACJNI_JAVAC is not an absolute path name]);;
+esac
+
+_ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC")
+_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
+case "$host_os" in
+ darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ _JINC="$_JTOPDIR/Headers";;
+ *) _JINC="$_JTOPDIR/include";;
+esac
+
+# If we find jni.h in /usr/include, then it's not a java-only tree, so
+# don't add /usr/include or subdirectories to the list of includes.
+# An extra -I/usr/include can foul things up with newer gcc's.
+if test -f "$_JINC/jni.h"; then
+ if test "$_JINC" != "/usr/include"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC"
+ fi
+else
+ _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ if test -f "$_JTOPDIR/include/jni.h"; then
+ if test "$_JTOPDIR" != "/usr"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include"
+ fi
+ else
+ AC_MSG_ERROR([cannot find java include files])
+ fi
+fi
+
+# get the likely subdirectories for system specific java includes
+if test "$_JTOPDIR" != "/usr"; then
+ case "$host_os" in
+ aix*) _JNI_INC_SUBDIRS="aix";;
+ bsdi*) _JNI_INC_SUBDIRS="bsdos";;
+ linux*) _JNI_INC_SUBDIRS="linux genunix";;
+ osf*) _JNI_INC_SUBDIRS="alpha";;
+ solaris*) _JNI_INC_SUBDIRS="solaris";;
+ *) _JNI_INC_SUBDIRS="genunix";;
+ esac
+fi
+
+# add any subdirectories that are present
+for _JINCSUBDIR in $_JNI_INC_SUBDIRS
+do
+ if test -d "$_JTOPDIR/include/$_JINCSUBDIR"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$_JINCSUBDIR"
+ fi
+done
+])
+
+# _ACJNI_FOLLOW_SYMLINKS <path>
+# Follows symbolic links on <path>,
+# finally setting variable _ACJNI_FOLLOWED
+# --------------------
+AC_DEFUN(_ACJNI_FOLLOW_SYMLINKS,[
+# find the include directory relative to the javac executable
+_cur="$1"
+while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do
+ AC_MSG_CHECKING(symlink for $_cur)
+ _slink=`ls -ld "$_cur" | sed 's/.* -> //'`
+ case "$_slink" in
+ /*) _cur="$_slink";;
+ # 'X' avoids triggering unwanted echo options.
+ *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";;
+ esac
+ AC_MSG_RESULT($_cur)
+done
+_ACJNI_FOLLOWED="$_cur"
+])# _ACJNI
+
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_jar.ac b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
new file mode 100644
index 00000000000..9dfa1be6dad
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_jar.ac
@@ -0,0 +1,36 @@
+dnl @synopsis AC_PROG_JAR
+dnl
+dnl AC_PROG_JAR tests for an existing jar program. It uses the environment
+dnl variable JAR then tests in sequence various common jar programs.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAR=yourcompiler before calling
+dnl AC_PROG_JAR
+dnl
+dnl - at the configure level, setenv JAR
+dnl
+dnl You can use the JAR variable in your Makefile.in, with @JAR@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id: ac_prog_jar.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAR],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT)
+else
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX)
+fi
+test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java.ac b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
new file mode 100644
index 00000000000..8cb24445132
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java.ac
@@ -0,0 +1,77 @@
+dnl @synopsis AC_PROG_JAVA
+dnl
+dnl Here is a summary of the main macros:
+dnl
+dnl AC_PROG_JAVAC: finds a Java compiler.
+dnl
+dnl AC_PROG_JAVA: finds a Java virtual machine.
+dnl
+dnl AC_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
+dnl
+dnl AC_CHECK_RQRD_CLASS: finds if we have the given class and stops otherwise.
+dnl
+dnl AC_TRY_COMPILE_JAVA: attempt to compile user given source.
+dnl
+dnl AC_TRY_RUN_JAVA: attempt to compile and run user given source.
+dnl
+dnl AC_JAVA_OPTIONS: adds Java configure options.
+dnl
+dnl AC_PROG_JAVA tests an existing Java virtual machine. It uses the
+dnl environment variable JAVA then tests in sequence various common Java
+dnl virtual machines. For political reasons, it starts with the free ones.
+dnl You *must* call [AC_PROG_JAVAC] before.
+dnl
+dnl If you want to force a specific VM:
+dnl
+dnl - at the configure.in level, set JAVA=yourvm before calling AC_PROG_JAVA
+dnl (but after AC_INIT)
+dnl
+dnl - at the configure level, setenv JAVA
+dnl
+dnl You can use the JAVA variable in your Makefile.in, with @JAVA@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude virtual machines (rationale: most Java programs
+dnl cannot run with some VM like kaffe).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl A Web page, with a link to the latest CVS snapshot is at
+dnl <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
+dnl
+dnl This is a sample configure.in
+dnl Process this file with autoconf to produce a configure script.
+dnl
+dnl AC_INIT(UnTag.java)
+dnl
+dnl dnl Checks for programs.
+dnl AC_CHECK_CLASSPATH
+dnl AC_PROG_JAVAC
+dnl AC_PROG_JAVA
+dnl
+dnl dnl Checks for classes
+dnl AC_CHECK_RQRD_CLASS(org.xml.sax.Parser)
+dnl AC_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
+dnl
+dnl AC_OUTPUT(Makefile)
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_java.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVA],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test x$JAVAPREFIX = x; then
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT)
+else
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT, $JAVAPREFIX)
+fi
+test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
+AC_PROG_JAVA_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
new file mode 100644
index 00000000000..36acd2676fa
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_java_works.ac
@@ -0,0 +1,97 @@
+dnl @synopsis AC_PROG_JAVA_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_java_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVA_WORKS], [
+AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes)
+if test x$uudecode = xyes; then
+AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [
+dnl /**
+dnl * Test.java: used to test if java compiler works.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s
+YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG
+aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB
+AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB
+AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ=
+====
+EOF
+if uudecode$EXEEXT Test.uue; then
+ ac_cv_prog_uudecode_base64=yes
+else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+fi
+rm -f Test.uue])
+fi
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ rm -f Test.class
+ AC_MSG_WARN([I have to compile Test.class from scratch])
+ if test x$ac_cv_prog_javac_works = xno; then
+ AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly])
+ fi
+ if test x$ac_cv_prog_javac_works = x; then
+ AC_PROG_JAVAC
+ fi
+fi
+AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+TEST=Test
+changequote(, )dnl
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+public static void main (String args[]) {
+ System.exit (0);
+} }
+EOF
+changequote([, ])dnl
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then
+ :
+ else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?))
+ fi
+fi
+if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then
+ ac_cv_prog_java_works=yes
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?))
+fi
+rm -fr $JAVA_TEST $CLASS_TEST Test.uue
+])
+AC_PROVIDE([$0])dnl
+]
+)
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
new file mode 100644
index 00000000000..5ded7d1b7e6
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac.ac
@@ -0,0 +1,43 @@
+dnl @synopsis AC_PROG_JAVAC
+dnl
+dnl AC_PROG_JAVAC tests an existing Java compiler. It uses the environment
+dnl variable JAVAC then tests in sequence various common Java compilers. For
+dnl political reasons, it starts with the free ones.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_PROG_JAVAC
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl You can use the JAVAC variable in your Makefile.in, with @JAVAC@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude compilers (rationale: most Java programs cannot compile
+dnl with some compilers like guavac).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_javac.ac,v 1.3 2001/08/23 17:08:22 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT)
+else
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX)
+fi
+test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH])
+AC_PROG_JAVAC_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
new file mode 100644
index 00000000000..139a99f989b
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac
@@ -0,0 +1,35 @@
+dnl @synopsis AC_PROG_JAVAC_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_javac_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAC_WORKS],[
+AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then
+ ac_cv_prog_javac_works=yes
+else
+ AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)])
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+])
+AC_PROVIDE([$0])dnl
+])
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
new file mode 100644
index 00000000000..5154d3f1f3b
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac
@@ -0,0 +1,37 @@
+dnl @synopsis AC_PROG_JAVADOC
+dnl
+dnl AC_PROG_JAVADOC tests for an existing javadoc generator. It uses the environment
+dnl variable JAVADOC then tests in sequence various common javadoc generator.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVADOC=yourgenerator before calling
+dnl AC_PROG_JAVADOC
+dnl
+dnl - at the configure level, setenv JAVADOC
+dnl
+dnl You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id: ac_prog_javadoc.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVADOC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc$EXEEXT)
+else
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX)
+fi
+test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
+
diff --git a/storage/bdb/dist/aclocal_java/ac_prog_javah.ac b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
new file mode 100644
index 00000000000..1b16d9e24e5
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_prog_javah.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_PROG_JAVAH
+dnl
+dnl AC_PROG_JAVAH tests the availability of the javah header generator
+dnl and looks for the jni.h header file. If available, JAVAH is set to
+dnl the full path of javah and CPPFLAGS is updated accordingly.
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id: ac_prog_javah.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAH],[
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_PATH_PROG(JAVAH,javah)
+if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then
+ AC_TRY_CPP([#include <jni.h>],,[
+ ac_save_CPPFLAGS="$CPPFLAGS"
+changequote(, )dnl
+ ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'`
+ ac_machdep=`echo $build_os | sed 's,[-0-9].*,,'`
+changequote([, ])dnl
+ CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep"
+ AC_TRY_CPP([#include <jni.h>],
+ ac_save_CPPFLAGS="$CPPFLAGS",
+ AC_MSG_WARN([unable to include <jni.h>]))
+ CPPFLAGS="$ac_save_CPPFLAGS"])
+fi])
diff --git a/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
new file mode 100644
index 00000000000..775569ba054
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_try_compile_java.ac
@@ -0,0 +1,39 @@
+dnl @synopsis AC_TRY_COMPILE_JAVA
+dnl
+dnl AC_TRY_COMPILE_JAVA attempt to compile user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_try_compile_java.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_TRY_COMPILE_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [import $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
new file mode 100644
index 00000000000..cf91306aff6
--- /dev/null
+++ b/storage/bdb/dist/aclocal_java/ac_try_run_javac.ac
@@ -0,0 +1,40 @@
+dnl @synopsis AC_TRY_RUN_JAVA
+dnl
+dnl AC_TRY_RUN_JAVA attempt to compile and run user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_try_run_javac.ac,v 1.1 2001/08/23 16:58:45 dda Exp $
+dnl
+AC_DEFUN([AC_TRY_RUN_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+AC_REQUIRE([AC_PROG_JAVA])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [include $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/storage/bdb/dist/buildrel b/storage/bdb/dist/buildrel
new file mode 100644
index 00000000000..b796169c719
--- /dev/null
+++ b/storage/bdb/dist/buildrel
@@ -0,0 +1,109 @@
+#!/bin/sh -
+# $Id: buildrel,v 1.39 2002/09/06 14:30:31 bostic Exp $
+#
+# Build the distribution archives.
+#
+# A set of commands intended to be cut and pasted into a csh window.
+
+# Development tree, release home.
+setenv D `pwd`
+
+# Update the release number.
+cd $D/dist
+vi RELEASE
+setenv VERSION \
+`sh -c '. RELEASE; echo $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH'`
+echo "Version: $VERSION"
+
+# Make sure the source tree is up-to-date, generate new support files, and
+# commit anything that's changed.
+cd $D && cvs -q update
+cd $D/dist && sh s_all
+cd $D && cvs -q commit
+
+# Copy a development tree into a release tree.
+setenv R /var/tmp/db-$VERSION
+rm -rf $R && mkdir -p $R
+cd $D && tar cf - \
+`cvs -q status | sed -n -e "/Repository/s;.*/CVSROOT/db/;;" -e "s/,v//p"` | \
+(cd $R && tar xpf -)
+
+# Fix symbolic links and permissions.
+cd $R/dist && sh s_perm
+cd $R/dist && sh s_symlink
+
+# Build a version.
+cd $R && rm -rf build_run && mkdir build_run
+cd $R/build_run && ~bostic/bin/dbconf && make >& mklog
+
+# Smoke test.
+cd $R/build_run && ./ex_access
+
+# Build the documentation.
+cd $R/docs_src && sh build clean
+cd $R/docs_src && sh build |& sed '/.html$/d'
+
+# Check the install
+cd $R/build_run && make prefix=`pwd`/BDB install
+
+# Clean up the tree.
+cd $R && rm -rf build_run docs_src
+cd $R && rm -rf test/TODO test/upgrade test_perf test_purify
+cd $R && rm -rf test_server test_thread test_vxworks test_xa
+
+# ACQUIRE ROOT PRIVILEGES
+cd $R && find . -type d | xargs chmod 775
+cd $R && find . -type f | xargs chmod 444
+cd $R && chmod 664 build_win32/*.dsp
+cd $R/dist && sh s_perm
+chown -R 100.100 $R
+# DISCARD ROOT PRIVILEGES
+
+# Compare this release with the last one.
+set LR=3.1.X
+cd $R/.. && gzcat /a/releases/db-${LR}.tar.gz | tar xf -
+cd $R/../db-${LR} && find . | sort > /tmp/__OLD
+cd $R && find . | sort > /tmp/__NEW
+diff -c /tmp/__OLD /tmp/__NEW
+
+# Create the crypto tar archive release.
+setenv T "$R/../db-$VERSION.tar.gz"
+cd $R/.. && tar cf - db-$VERSION | gzip --best > $T
+chmod 444 $T
+
+# Create the non-crypto tree.
+setenv RNC "$R/../db-$VERSION.NC"
+rm -rf $RNC $R/../__TMP && mkdir $R/../__TMP
+cd $R/../__TMP && gzcat $T | tar xpf - && mv -i db-$VERSION $RNC
+cd $R && rm -rf $R/../__TMP
+cd $RNC/dist && sh s_crypto
+
+# ACQUIRE ROOT PRIVILEGES
+cd $RNC && find . -type d | xargs chmod 775
+cd $RNC && find . -type f | xargs chmod 444
+cd $RNC && chmod 664 build_win32/*.dsp
+cd $RNC/dist && sh s_perm
+chown -R 100.100 $RNC
+# DISCARD ROOT PRIVILEGES
+
+# Create the non-crypto tar archive release.
+setenv T "$R/../db-$VERSION.NC.tar.gz"
+cd $RNC/.. && tar cf - db-$VERSION.NC | gzip --best > $T
+chmod 444 $T
+
+# Remove symbolic links to tags files. They're large and we don't want
+# to store real symbolic links in the zip archive for portability reasons.
+# ACQUIRE ROOT PRIVILEGES
+cd $R && rm -f `find . -type l -name 'tags'`
+cd $RNC && rm -f `find . -type l -name 'tags'`
+# DISCARD ROOT PRIVILEGES
+
+# Create the crypto zip archive release.
+setenv T "$R/../db-$VERSION.zip"
+cd $R/.. && zip -r - db-$VERSION > $T
+chmod 444 $T
+
+# Create the non-crypto zip archive release.
+setenv T "$R/../db-$VERSION.NC.zip"
+cd $RNC/.. && zip -r - db-$VERSION.NC > $T
+chmod 444 $T
diff --git a/storage/bdb/dist/config.guess b/storage/bdb/dist/config.guess
new file mode 100755
index 00000000000..fd30ab0314c
--- /dev/null
+++ b/storage/bdb/dist/config.guess
@@ -0,0 +1,1354 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-23'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Per Bothner <per@bothner.com>.
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit build system type.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# This shell variable is my proudest work .. or something. --bje
+
+set_cc_for_build='tmpdir=${TMPDIR-/tmp}/config-guess-$$ ;
+(old=`umask` && umask 077 && mkdir $tmpdir && umask $old && unset old)
+ || (echo "$me: cannot create $tmpdir" >&2 && exit 1) ;
+dummy=$tmpdir/dummy ;
+files="$dummy.c $dummy.o $dummy.rel $dummy" ;
+trap '"'"'rm -f $files; rmdir $tmpdir; exit 1'"'"' 1 2 15 ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c $dummy.c -c -o $dummy.o) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ rm -f $files ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ;
+unset files'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit 0 ;;
+ amiga:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ arc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ hp300:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mac68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ macppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme88k:OpenBSD:*:*)
+ echo m88k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvmeppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ pmax:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sgi:OpenBSD:*:*)
+ echo mipseb-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sun3:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ wgrisc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ *:OpenBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ alpha:OSF1:*:*)
+ if test $UNAME_RELEASE = "V4.0"; then
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ fi
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ eval $set_cc_for_build
+ cat <<EOF >$dummy.s
+ .data
+\$Lformat:
+ .byte 37,100,45,37,120,10,0 # "%d-%x\n"
+
+ .text
+ .globl main
+ .align 4
+ .ent main
+main:
+ .frame \$30,16,\$26,0
+ ldgp \$29,0(\$27)
+ .prologue 1
+ .long 0x47e03d80 # implver \$0
+ lda \$2,-1
+ .long 0x47e20c21 # amask \$2,\$1
+ lda \$16,\$Lformat
+ mov \$0,\$17
+ not \$1,\$18
+ jsr \$26,printf
+ ldgp \$29,0(\$26)
+ mov 0,\$16
+ jsr \$26,exit
+ .end main
+EOF
+ $CC_FOR_BUILD $dummy.s -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ case `$dummy` in
+ 0-0)
+ UNAME_MACHINE="alpha"
+ ;;
+ 1-0)
+ UNAME_MACHINE="alphaev5"
+ ;;
+ 1-1)
+ UNAME_MACHINE="alphaev56"
+ ;;
+ 1-101)
+ UNAME_MACHINE="alphapca56"
+ ;;
+ 2-303)
+ UNAME_MACHINE="alphaev6"
+ ;;
+ 2-307)
+ UNAME_MACHINE="alphaev67"
+ ;;
+ 2-1307)
+ UNAME_MACHINE="alphaev68"
+ ;;
+ 3-1307)
+ UNAME_MACHINE="alphaev7"
+ ;;
+ esac
+ fi
+ rm -f $dummy.s $dummy && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit 0 ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit 0 ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit 0;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit 0 ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit 0 ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit 0 ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit 0;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit 0;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit 0 ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit 0 ;;
+ DRS?6000:UNIX_SV:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7 && exit 0 ;;
+ esac ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit 0 ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit 0 ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit 0 ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit 0 ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit 0 ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit 0 ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit 0 ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy \
+ && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
+ && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit 0 ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit 0 ;;
+ Night_Hawk:*:*:PowerMAX_OS)
+ echo powerpc-harris-powermax
+ exit 0 ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit 0 ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit 0 ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit 0 ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit 0 ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit 0 ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit 0 ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo rs6000-ibm-aix3.2.5
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit 0 ;;
+ *:AIX:*:[45])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit 0 ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit 0 ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit 0 ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit 0 ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit 0 ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit 0 ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit 0 ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null) && HP_ARCH=`$dummy`
+ if test -z "$HP_ARCH"; then HP_ARCH=hppa; fi
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ fi ;;
+ esac
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo unknown-hitachi-hiuxwe2
+ exit 0 ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit 0 ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit 0 ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit 0 ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit 0 ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit 0 ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit 0 ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit 0 ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3D:*:*:*)
+ echo alpha-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit 0 ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:FreeBSD:*:*)
+ # Determine whether the default compiler uses glibc.
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #if __GLIBC__ >= 2
+ LIBC=gnu
+ #else
+ LIBC=
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`${LIBC:+-$LIBC}
+ exit 0 ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit 0 ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit 0 ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit 0 ;;
+ x86:Interix*:3*)
+ echo i386-pc-interix3
+ exit 0 ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i386-pc-interix
+ exit 0 ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit 0 ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit 0 ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ *:GNU:*:*)
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit 0 ;;
+ arm*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ mips:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mipsel
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${CPU}" != x && echo "${CPU}-pc-linux-gnu" && exit 0
+ ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit 0 ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit 0 ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit 0 ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit 0 ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit 0 ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit 0 ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit 0 ;;
+ i*86:Linux:*:*)
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ # Set LC_ALL=C to ensure ld outputs messages in English.
+ ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
+ | sed -ne '/supported targets:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported targets: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_targets" in
+ elf32-i386)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ a.out-i386-linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit 0 ;;
+ coff-i386)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit 0 ;;
+ "")
+ # Either a pre-BFD a.out linker (linux-gnuoldld) or
+ # one that does not give us useful --help.
+ echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
+ exit 0 ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #ifdef __ELF__
+ # ifdef __GLIBC__
+ # if __GLIBC__ >= 2
+ LIBC=gnu
+ # else
+ LIBC=gnulibc1
+ # endif
+ # else
+ LIBC=gnulibc1
+ # endif
+ #else
+ #ifdef __INTEL_COMPILER
+ LIBC=gnu
+ #else
+ LIBC=gnuaout
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0
+ test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
+ ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit 0 ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit 0 ;;
+ i*86:*:5:[78]*)
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit 0 ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit 0 ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit 0 ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit 0 ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit 0 ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit 0 ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit 0 ;;
+ M68*:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit 0 ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit 0 ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit 0 ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit 0 ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit 0 ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit 0 ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit 0 ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit 0 ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit 0 ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit 0 ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit 0 ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit 0 ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Darwin:*:*)
+ echo `uname -p`-apple-darwin${UNAME_RELEASE}
+ exit 0 ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit 0 ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit 0 ;;
+ NSR-[GKLNPTVW]:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit 0 ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit 0 ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit 0 ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit 0 ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit 0 ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit 0 ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit 0 ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit 0 ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit 0 ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit 0 ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit 0 ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit 0 ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit 0 ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit 0 ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+rm -f $dummy.c $dummy && rmdir $tmpdir
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ c34*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ c38*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ c4*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ ftp://ftp.gnu.org/pub/gnu/config/
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/bdb/dist/config.sub b/storage/bdb/dist/config.sub
new file mode 100755
index 00000000000..9ff085efaf7
--- /dev/null
+++ b/storage/bdb/dist/config.sub
@@ -0,0 +1,1460 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-03'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit 0;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | freebsd*-gnu* | storm-chaos* | os2-emx* | windows32-* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k \
+ | m32r | m68000 | m68k | m88k | mcore \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64orion | mips64orionel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | ns16k | ns32k \
+ | openrisc | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | pyramid \
+ | sh | sh[1234] | sh3e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv9 | sparcv9b \
+ | strongarm \
+ | tahoe | thumb | tic80 | tron \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xscale | xstormy16 | xtensa \
+ | z8k)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* \
+ | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c54x-* \
+ | clipper-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* \
+ | m32r-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | mcore-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipstx39 | mipstx39el \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | pyramid-* \
+ | romp-* | rs6000-* \
+ | sh-* | sh[1234]-* | sh3e-* | sh[34]eb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
+ | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
+ | tahoe-* | thumb-* | tic30-* | tic54x-* | tic80-* | tron-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
+ | xtensa-* \
+ | ymp-* \
+ | z8k-*)
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ mmix*)
+ basic_machine=mmix-knuth
+ os=-mmixware
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ or32 | or32-*)
+ basic_machine=or32-unknown
+ os=-coff
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2)
+ basic_machine=i686-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3d)
+ basic_machine=alpha-cray
+ os=-unicos
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ windows32)
+ basic_machine=i386-pc
+ os=-windows32-msvcrt
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh3 | sh4 | sh3eb | sh4eb | sh[1234]le | sh3ele)
+ basic_machine=sh-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparc | sparcv9 | sparcv9b)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ c4x*)
+ basic_machine=c4x-none
+ os=-coff
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -netbsd* | -openbsd* | -freebsd* | -riscix* \
+ | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* | -powermax*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto*)
+ os=-nto-qnx
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/bdb/dist/configure.ac b/storage/bdb/dist/configure.ac
new file mode 100644
index 00000000000..98cf0f63b39
--- /dev/null
+++ b/storage/bdb/dist/configure.ac
@@ -0,0 +1,611 @@
+# $Id: configure.ac,v 11.156 2002/09/04 13:51:17 bostic Exp $
+# Process this file with autoconf to produce a configure script.
+
+PACKAGE=db
+AC_INIT(Berkeley DB,
+ __EDIT_DB_VERSION__, support@sleepycat.com, db-__EDIT_DB_VERSION__)
+AC_CONFIG_SRCDIR([../db/db.c])
+AC_CONFIG_HEADER(db_config.h:config.hin)
+
+# Configure setup.
+AC_CANONICAL_HOST()
+AC_ARG_PROGRAM()
+
+# We cannot build in the top-level directory.
+AC_MSG_CHECKING(if building in the top-level directory)
+[ test -d db_archive ] && AC_MSG_ERROR([
+Berkeley DB cannot be built in the top-level distribution directory.])
+AC_MSG_RESULT(no)
+
+# Minimum autoconf version required.
+AC_PREREQ(2.53)
+
+# Substitution variables.
+AC_SUBST(ADDITIONAL_INCS)
+AC_SUBST(ADDITIONAL_LANG)
+AC_SUBST(ADDITIONAL_OBJS)
+AC_SUBST(ADDITIONAL_PROGS)
+AC_SUBST(BUILD_TARGET)
+AC_SUBST(CFLAGS)
+AC_SUBST(CONFIGURATION_ARGS)
+AC_SUBST(CONFIGURATION_PATH)
+AC_SUBST(CPPFLAGS)
+AC_SUBST(CXX)
+AC_SUBST(CXXFLAGS)
+AC_SUBST(DEFAULT_LIB)
+AC_SUBST(DEFAULT_LIB_CXX)
+AC_SUBST(EMBEDIX_ECD_CXX)
+AC_SUBST(EMBEDIX_ECD_RPC)
+AC_SUBST(EMBEDIX_ROOT)
+AC_SUBST(INSTALLER)
+AC_SUBST(INSTALL_LIBS)
+AC_SUBST(INSTALL_TARGET)
+AC_SUBST(JAR)
+AC_SUBST(JAVACFLAGS)
+AC_SUBST(LDFLAGS)
+AC_SUBST(LIBJSO_LIBS)
+AC_SUBST(LIBS)
+AC_SUBST(LIBSO_LIBS)
+AC_SUBST(LIBTOOL)
+AC_SUBST(LIBTSO_LIBS)
+AC_SUBST(LIBXSO_LIBS)
+AC_SUBST(LOAD_LIBS)
+AC_SUBST(MAKEFILE_CC)
+AC_SUBST(MAKEFILE_CCLINK)
+AC_SUBST(MAKEFILE_CXX)
+AC_SUBST(MAKEFILE_CXXLINK)
+AC_SUBST(MAKEFILE_SOLINK)
+AC_SUBST(MAKEFILE_XSOLINK)
+AC_SUBST(POSTLINK)
+AC_SUBST(RPC_CLIENT_OBJS)
+AC_SUBST(RPM_POST_INSTALL)
+AC_SUBST(RPM_POST_UNINSTALL)
+AC_SUBST(SOFLAGS)
+AC_SUBST(db_cv_path_embedix_install)
+AC_SUBST(db_cv_path_rpm_archive)
+AC_SUBST(db_int_def)
+AC_SUBST(o)
+
+# RPM needs the current absolute path.
+# RPM needs the list of original arguments, but we don't include the RPM
+# option itself.
+CONFIGURATION_PATH=${PWD-`pwd`}
+CONFIGURATION_ARGS=`echo "$*" |
+ sed -e 's/--with-embedix[[^ ]]*//' -e 's/--with-rpm[[^ ]]*//'`
+
+# Set the version.
+AM_VERSION_SET
+
+# Set the default installation location.
+AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@)
+
+# Process all options before using them.
+AM_OPTIONS_SET
+
+# Set some #defines based on configuration options.
+if test "$db_cv_diagnostic" = yes; then
+ AC_DEFINE(DIAGNOSTIC)
+ AH_TEMPLATE(DIAGNOSTIC,
+ [Define to 1 if you want a version with run-time diagnostic checking.])
+fi
+if test "$db_cv_debug_rop" = yes; then
+ AC_DEFINE(DEBUG_ROP)
+ AH_TEMPLATE(DEBUG_ROP,
+ [Define to 1 if you want a version that logs read operations.])
+fi
+if test "$db_cv_debug_wop" = yes; then
+ AC_DEFINE(DEBUG_WOP)
+ AH_TEMPLATE(DEBUG_WOP,
+ [Define to 1 if you want a version that logs write operations.])
+fi
+if test "$db_cv_umrw" = yes; then
+ AC_DEFINE(UMRW)
+ AH_TEMPLATE(UMRW,
+ [Define to 1 to mask harmless unitialized memory read/writes.])
+
+fi
+if test "$db_cv_test" = yes; then
+ AC_DEFINE(CONFIG_TEST)
+ AH_TEMPLATE(CONFIG_TEST,
+ [Define to 1 if you want to build a version for running the test suite.])
+fi
+
+# Check for programs used in building and installation.
+AM_PROGRAMS_SET
+AC_PROG_INSTALL
+
+# RPM/Embedix support: change the standard make and install targets
+if test "$db_cv_rpm" = "yes"; then
+ BUILD_TARGET="rpm_build"
+ echo "topdir: $CONFIGURATION_PATH" > rpmrc
+ if test "$db_cv_embedix" = "yes"; then
+ EMBEDIX_ROOT="/usr"
+ INSTALL_TARGET="embedix_install"
+ else
+ INSTALL_TARGET="rpm_install"
+ fi
+else
+ BUILD_TARGET="library_build"
+ INSTALL_TARGET="library_install"
+fi
+
+# This is where we handle stuff that autoconf can't handle: compiler,
+# preprocessor and load flags, libraries that the standard tests don't
+# look for. The default optimization is -O. We would like to set the
+# default optimization for systems using gcc to -O2, but we can't. By
+# the time we know we're using gcc, it's too late to set optimization
+# flags.
+#
+# There are additional libraries we need for some compiler/architecture
+# combinations.
+#
+# Some architectures require DB to be compiled with special flags and/or
+# libraries for threaded applications
+#
+# The makefile CC may be different than the CC used in config testing,
+# because the makefile CC may be set to use $(LIBTOOL).
+#
+# XXX
+# Don't override anything if it's already set from the environment.
+optimize_def="-O"
+case "$host_os" in
+aix4.3.*|aix5*)
+ optimize_def="-O2"
+ CC=${CC-"xlc_r"}
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -Wl,-brtl";;
+bsdi3*) optimize_def="-O2"
+ CC=${CC-"shlicc2"}
+ LIBS="$LIBS -lipc";;
+bsdi*) optimize_def="-O2";;
+freebsd*)
+ optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -pthread";;
+hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+irix*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";;
+linux*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";;
+mpeix*) CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE"
+ LIBS="$LIBS -lsocket -lsvipc";;
+osf*) CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ LDFLAGS="$LDFLAGS -pthread";;
+*qnx) AC_DEFINE(HAVE_QNX)
+ AH_TEMPLATE(HAVE_QNX, [Define to 1 if building on QNX.]);;
+solaris*)
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+esac
+
+# Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
+# compiler configuration macros, because if we don't, they set CFLAGS
+# to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_def}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
+
+# If the user wants a debugging environment, add -g to the CFLAGS value.
+#
+# XXX
+# Some compilers can't mix optimizing and debug flags. The only way to
+# handle this is to specify CFLAGS in the environment before configuring.
+if test "$db_cv_debug" = yes; then
+ AC_DEFINE(DEBUG)
+ AH_TEMPLATE(DEBUG, [Define to 1 if you want a debugging version.])
+
+ CFLAGS="$CFLAGS -g"
+ CXXFLAGS="$CXXFLAGS -g"
+fi
+
+# The default compiler is cc (NOT gcc), the default CFLAGS is as specified
+# above, NOT what is set by AC_PROG_CC, as it won't set optimization flags
+# for any compiler other than gcc.
+AC_PROG_CC(cc gcc)
+
+# Because of shared library building, the ${CC} used for config tests
+# may be different than the ${CC} we want to put in the Makefile.
+# The latter is known as ${MAKEFILE_CC} in this script.
+MAKEFILE_CC="${CC}"
+MAKEFILE_CCLINK="${CC}"
+MAKEFILE_CXX="nocxx"
+MAKEFILE_CXXLINK="nocxx"
+
+# See if we need the C++ compiler at all. If so, we'd like to find one that
+# interoperates with the C compiler we chose. Since we prefered cc over gcc,
+# we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
+# user can set CC and CXX in their environment before running configure.
+#
+# AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
+# first choices.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$GCC" != "yes"; then
+ case "$host_os" in
+ aix*) AC_CHECK_TOOL(CCC, xlC_r)
+ LIBXSO_LIBS="-lC_r $LIBXSO_LIBS"
+ LIBS="-lC_r $LIBS";;
+ hpux*) AC_CHECK_TOOL(CCC, aCC);;
+ irix*) AC_CHECK_TOOL(CCC, CC);;
+ osf*) AC_CHECK_TOOL(CCC, cxx);;
+ solaris*) AC_CHECK_TOOL(CCC, CC);;
+ esac
+ fi
+ AC_PROG_CXX
+ AC_CXX_HAVE_STDHEADERS
+ MAKEFILE_CXX="${CXX}"
+ MAKEFILE_CXXLINK="${CXX}"
+fi
+
+# Do some gcc specific configuration.
+AC_GCC_CONFIG1
+AC_GCC_CONFIG2
+
+# We need the -Kthread/-pthread flag when compiling on SCO/Caldera's UnixWare
+# and OpenUNIX releases. We can't make the test until we know which compiler
+# we're using.
+case "$host_os" in
+sysv5UnixWare*|sysv5OpenUNIX8*)
+ if test "$GCC" == "yes"; then
+ CPPFLAGS="$CPPFLAGS -pthread"
+ LDFLAGS="$LDFLAGS -pthread"
+ else
+ CPPFLAGS="$CPPFLAGS -Kthread"
+ LDFLAGS="$LDFLAGS -Kthread"
+ fi;;
+esac
+
+# Export our compiler preferences for the libtool configuration.
+export CC CCC
+CCC=CXX
+
+# Libtool configuration.
+AC_PROG_LIBTOOL
+
+LIBTOOL="\$(SHELL) ./libtool"
+SOFLAGS="-rpath \$(libdir)"
+
+# Set SOSUFFIX and friends
+SOSUFFIX_CONFIG
+MODSUFFIX_CONFIG
+JMODSUFFIX_CONFIG
+
+INSTALLER="\$(LIBTOOL) --mode=install cp -p"
+
+MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${MAKEFILE_CC}"
+MAKEFILE_SOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK} -avoid-version"
+MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK}"
+MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${MAKEFILE_CXX}"
+MAKEFILE_XSOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK} -avoid-version"
+MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}"
+
+# Configure for shared libraries, static libraries, or both. If both are
+# configured, build the utilities and example programs with shared versions.
+#
+# $o is set to ".o" or ".lo", and is the file suffix used in the Makefile
+# instead of .o
+if test "$enable_shared" = "no"; then
+ DEFAULT_LIB="\$(libdb)"
+ POSTLINK="@true"
+ o=".o"
+fi
+if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB="\$(libso_target)"
+ POSTLINK="\$(LIBTOOL) --mode=execute true"
+ o=".lo"
+fi
+INSTALL_LIBS="$DEFAULT_LIB"
+
+# Optional C++ API.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ DEFAULT_LIB_CXX="\$(libcxx)"
+ fi
+ if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB_CXX="\$(libxso_target)"
+ fi
+ INSTALL_LIBS="$INSTALL_LIBS $DEFAULT_LIB_CXX"
+
+ # Fill in C++ library for Embedix.
+ EMBEDIX_ECD_CXX='<OPTION db-extra>\
+ TYPE=bool\
+ DEFAULT_VALUE=1\
+ PROMPT=Include BerkeleyDB C++ library?\
+ <KEEPLIST>\
+ /usr/include/db_cxx.h\
+ /usr/lib/libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </KEEPLIST>\
+ <PROVIDES>\
+ libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </PROVIDES>\
+ <REQUIRES>\
+ ld-linux.so.2\
+ libc.so.6\
+ </REQUIRES>\
+ STATIC_SIZE=0\
+ STORAGE_SIZE=523612\
+ STARTUP_TIME=0\
+ </OPTION>'
+fi
+
+# Optional Java API.
+if test "$db_cv_java" = "yes"; then
+ # Java requires shared libraries.
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Java requires shared libraries])
+ fi
+
+ AC_PROG_JAVAC
+ AC_PROG_JAR
+ AC_JNI_INCLUDE_DIR
+
+ for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+ do
+ CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+ done
+
+ ADDITIONAL_LANG="$ADDITIONAL_LANG java"
+ INSTALL_LIBS="$INSTALL_LIBS \$(libjso_target)"
+else
+ JAVAC=nojavac
+fi
+
+# Optional RPC client/server.
+if test "$db_cv_rpc" = "yes"; then
+ AC_DEFINE(HAVE_RPC)
+ AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.])
+
+ RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)"
+ ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
+
+ EMBEDIX_ECD_RPC="/usr/bin/berkeley_db_svc"
+
+ case "$host_os" in
+ hpux*)
+ AC_CHECK_FUNC(svc_run,,
+ AC_CHECK_LIB(nsl, svc_run,
+ LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"));;
+ solaris*)
+ AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));;
+ esac
+fi
+
+AM_TCL_LOAD
+
+# Optional crypto support.
+if test -d "$srcdir/../crypto"; then
+ AC_DEFINE(HAVE_CRYPTO)
+ AH_TEMPLATE(HAVE_CRYPTO,
+ [Define to 1 if Berkeley DB release includes strong cryptography.])
+ ADDITIONAL_OBJS="aes_method${o} crypto${o} mt19937db${o} rijndael-alg-fst${o} rijndael-api-fst${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional DB 1.85 compatibility API.
+if test "$db_cv_compat185" = "yes"; then
+ ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
+ ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional utilities.
+if test "$db_cv_dump185" = "yes"; then
+ ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
+fi
+
+# Checks for compiler characteristics.
+AC_C_CONST
+
+# Checks for include files, structures, C types.
+AC_HEADER_STAT
+AC_HEADER_TIME
+AC_HEADER_DIRENT
+AC_CHECK_HEADERS(sys/select.h sys/time.h)
+AC_CHECK_MEMBERS([struct stat.st_blksize])
+AM_TYPES
+
+AC_CACHE_CHECK([for ANSI C exit success/failure values], db_cv_exit_defines, [
+AC_TRY_COMPILE([#include <stdlib.h>], return (EXIT_SUCCESS);,
+ [db_cv_exit_defines=yes], [db_cv_exit_defines=no])])
+if test "$db_cv_exit_defines" = yes; then
+ AC_DEFINE(HAVE_EXIT_SUCCESS)
+ AH_TEMPLATE(HAVE_EXIT_SUCCESS,
+ [Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines.])
+fi
+
+# Test for various functions/libraries that the test and example programs use:
+# sched_yield function
+# pthreads, socket and math libraries
+AC_CHECK_FUNC(sched_yield,,
+ AC_SEARCH_LIBS(sched_yield, rt, LOAD_LIBS="$LOAD_LIBS -lrt"))
+
+# XXX
+# We can't check for pthreads in the same way we did the test for sched_yield
+# because the Solaris C library includes pthread interfaces which are not
+# thread-safe. For that reason we always add -lpthread if we find a pthread
+# library. Also we can't depend on any specific call existing (pthread_create,
+# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has
+# this problem.
+AC_HAVE_LIBRARY(pthread, LOAD_LIBS="$LOAD_LIBS -lpthread")
+
+# XXX
+# We could be more exact about whether these libraries are needed, but we don't
+# bother -- if they exist, we load them.
+AC_HAVE_LIBRARY(m, LOAD_LIBS="$LOAD_LIBS -lm")
+AC_HAVE_LIBRARY(socket, LOAD_LIBS="$LOAD_LIBS -lsocket")
+AC_HAVE_LIBRARY(nsl, LOAD_LIBS="$LOAD_LIBS -lnsl")
+
+# Check for mutexes.
+# We do this here because it changes $LIBS.
+AM_DEFINE_MUTEXES
+
+# Checks for system functions for which we have replacements.
+#
+# XXX
+# The only portable getcwd call is getcwd(char *, size_t), where the
+# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
+# deleted getwd().
+AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise)
+AC_REPLACE_FUNCS(snprintf strcasecmp strdup strerror vsnprintf)
+
+# Check for system functions we optionally use.
+AC_CHECK_FUNCS(_fstati64 clock_gettime directio gettimeofday getuid)
+AC_CHECK_FUNCS(pstat_getdynamic sched_yield select strtoul sysconf yield)
+
+# Checks for system functions for which we don't have replacements.
+# We require qsort(3).
+AC_CHECK_FUNCS(qsort, , AC_MSG_ERROR([No qsort library function.]))
+
+# Pread/pwrite.
+# HP-UX has pread/pwrite, but it doesn't work with largefile support.
+case "$host_os" in
+hpux*)
+ AC_MSG_WARN([pread/pwrite interfaces ignored on $host_os.]);;
+*) AC_CHECK_FUNCS(pread pwrite)
+esac
+
+# Check for fcntl(2) to deny child process access to file descriptors.
+AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ fcntl(1, F_SETFD, 1);
+], [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])])
+if test "$db_cv_fcntl_f_setfd" = yes; then
+ AC_DEFINE(HAVE_FCNTL_F_SETFD)
+ AH_TEMPLATE(HAVE_FCNTL_F_SETFD,
+ [Define to 1 if fcntl/F_SETFD denies child access to file descriptors.])
+fi
+
+# A/UX has a broken getopt(3).
+case "$host_os" in
+aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
+esac
+
+# Linux has the O_DIRECT flag, but you can't actually use it.
+AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [
+echo "test for working open/O_DIRECT" > __o_direct_file
+AC_TRY_RUN([
+#include <sys/types.h>
+#include <fcntl.h>
+main() {
+int c, fd = open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+exit ((fd == -1) || (read(fd, &c, 1) != 1));
+}], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no],
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no]))
+rm -f __o_direct_file])
+if test "$db_cv_open_o_direct" = yes; then
+ AC_DEFINE(HAVE_O_DIRECT)
+ AH_TEMPLATE(HAVE_O_DIRECT, [Define to 1 if you have the O_DIRECT flag.])
+fi
+
+# Check for largefile support.
+AC_SYS_LARGEFILE
+
+# Figure out how to create shared regions.
+#
+# First, we look for mmap.
+#
+# BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
+#
+# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
+# is defined in the C library) but does not support munmap(2). Don't
+# try to use mmap if we can't find munmap.
+#
+# Ultrix has mmap(2), but it doesn't work.
+mmap_ok=no
+case "$host_os" in
+bsdi3*|bsdi4.0)
+ AC_MSG_WARN([mlock(2) interface ignored on BSD/OS 3.X and 4.0.])
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+ultrix*)
+ AC_MSG_WARN([mmap(2) interface ignored on Ultrix.]);;
+*)
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mlock munlock)
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+esac
+
+# Second, we look for shmget.
+#
+# SunOS has the shmget(2) interfaces, but there appears to be a missing
+# #include <debug/debug.h> file, so we ignore them.
+shmget_ok=no
+case "$host_os" in
+sunos*)
+ AC_MSG_WARN([shmget(2) interface ignored on SunOS.]);;
+*)
+ shmget_ok=yes
+ AC_CHECK_FUNCS(shmget, , shmget_ok=no);;
+esac
+
+# We require either mmap/munmap(2) or shmget(2).
+if test "$mmap_ok" = no -a "$shmget_ok" = no; then
+ AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.])
+fi
+
+# If we're not doing version name substitution, DB_VERSION_UNIQUE_NAME
+# needs to be erased.
+if test "$db_cv_uniquename" = "no"; then
+ DB_VERSION_UNIQUE_NAME=""
+fi
+
+# This is necessary so that .o files in LIBOBJS are also built via
+# the ANSI2KNR-filtering rules.
+LIB@&t@OBJS=`echo "$LIB@&t@OBJS" |
+ sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'`
+LTLIBOBJS=`echo "$LIB@&t@OBJS" |
+ sed "s,\.[[^.]]* ,$o ,g;s,\.[[^.]]*$,$o,"`
+AC_SUBST(LTLIBOBJS)
+
+# Initial output file list.
+CREATE_LIST="Makefile
+ db_cxx.h:$srcdir/../dbinc/db_cxx.in
+ db_int.h:$srcdir/../dbinc/db_int.in
+ include.tcl:$srcdir/../test/include.tcl"
+
+# Create the db.h file from a source file, a list of global function
+# prototypes, and, if configured for unique names, a list of #defines
+# to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in"
+else
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in"
+fi
+
+# If configured for unique names, create the db_int_uext.h file (which
+# does the DB_VERSION_UNIQUE_NAME substitution), which is included by
+# the db_int.h file.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_int_def.h:$srcdir/../dbinc_auto/int_def.in"
+ db_int_def='#include "db_int_def.h"'
+fi
+
+# Create the db_185.h and db185_int.h files from source files, a list of
+# global function prototypes, and, if configured for unique names, a list
+# of #defines to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_compat185" = "yes"; then
+ if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ else
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ fi
+fi
+
+if test "$db_cv_embedix" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.ecd:../dist/db.ecd.in"
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in"
+fi
+
+AC_CONFIG_FILES($CREATE_LIST)
+AC_OUTPUT
diff --git a/storage/bdb/dist/db.ecd.in b/storage/bdb/dist/db.ecd.in
new file mode 100644
index 00000000000..92a6a090716
--- /dev/null
+++ b/storage/bdb/dist/db.ecd.in
@@ -0,0 +1,64 @@
+# Embedix Componenet Description (ECD) file for BerkeleyDB.
+#
+# $Id: db.ecd.in,v 11.1 2001/04/04 14:06:13 bostic Exp $
+
+<GROUP System>
+<GROUP Library>
+<COMPONENT BerkeleyDB>
+ SRPM=db
+ <SPECPATCH></SPECPATCH>
+ <HELP>
+ Berkeley DB is Sleepycat Software's programmatic database toolkit.
+ </HELP>
+
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB library?
+ <KEEPLIST>
+ /usr/lib/libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ /usr/include/db.h
+ /usr/lib/libdb.so
+ </KEEPLIST>
+ <PROVIDES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ </PROVIDES>
+ <REQUIRES>
+ ld-linux.so.2
+ libc.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+
+ @EMBEDIX_ECD_CXX@
+
+ <OPTION db-extra>
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB Utilities?
+ <KEEPLIST>
+ /usr/bin/db_archive
+ /usr/bin/db_checkpoint
+ /usr/bin/db_deadlock
+ /usr/bin/db_dump
+ /usr/bin/db_load
+ /usr/bin/db_printlog
+ /usr/bin/db_recover
+ /usr/bin/db_stat
+ /usr/bin/db_upgrade
+ /usr/bin/db_verify
+ @EMBEDIX_ECD_RPC@
+ </KEEPLIST>
+ <REQUIRES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ ld-linux.so.2
+ libc.so.6
+ libdl.so.2
+ libm.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+ </OPTION>
+
+</COMPONENT>
+</GROUP>
+</GROUP>
diff --git a/storage/bdb/dist/db.spec.in b/storage/bdb/dist/db.spec.in
new file mode 100644
index 00000000000..ef253bcfcf4
--- /dev/null
+++ b/storage/bdb/dist/db.spec.in
@@ -0,0 +1,52 @@
+# Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Summary: Sleepycat Berkeley DB database library
+Name: db
+Version: @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+Release: 1
+Copyright: Freely redistributable, see LICENSE for details.
+Source: http://www.sleepycat.com/update/@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+URL: http://www.sleepycat.com
+Group: System Environment/Libraries
+BuildRoot: @CONFIGURATION_PATH@/RPM_INSTALL
+
+%description
+Berkeley DB is a programmatic toolkit that provides fast, reliable,
+mission-critical, and scalable built-in database support for software
+ranging from embedded applications running on hand-held appliances to
+enterprise-scale servers.
+
+The Berkeley DB access methods include B+tree, Extended Linear Hashing,
+Fixed and Variable-length records, and Persistent Queues. Berkeley DB
+provides full transactional support, database recovery, online backups,
+and separate access to locking, logging and shared memory caching
+subsystems.
+
+Berkeley DB supports C, C++, Java, Tcl, Perl, and Python APIs. The
+software is available for Linux, a wide variety of UNIX platforms,
+Windows 95/98, Windows/NT, Windows 2000, VxWorks and QNX.
+
+%prep
+%setup
+
+%build
+cd build_unix
+CFLAGS="$RPM_OPT_FLAGS" ../dist/configure @CONFIGURATION_ARGS@
+make library_build
+
+%install
+cd build_unix
+make prefix=@CONFIGURATION_PATH@/RPM_INSTALL@EMBEDIX_ROOT@ install
+
+@RPM_POST_INSTALL@
+
+@RPM_POST_UNINSTALL@
+
+%files
+%defattr(-,root,root)
+%dir @EMBEDIX_ROOT@/bin
+%dir @EMBEDIX_ROOT@/docs
+%dir @EMBEDIX_ROOT@/include
+%dir @EMBEDIX_ROOT@/lib
+
+%changelog
diff --git a/storage/bdb/dist/gen_inc.awk b/storage/bdb/dist/gen_inc.awk
new file mode 100644
index 00000000000..2f5b491cda1
--- /dev/null
+++ b/storage/bdb/dist/gen_inc.awk
@@ -0,0 +1,73 @@
+# This awk script parses C input files looking for lines marked "PUBLIC:"
+# and "EXTERN:". (PUBLIC lines are DB internal function prototypes and
+# #defines, EXTERN are DB external function prototypes and #defines.)
+#
+# PUBLIC lines are put into two versions of per-directory include files:
+# one file that contains the prototypes, and one file that contains a
+# #define for the name to be processed during configuration when creating
+# unique names for every global symbol in the DB library.
+#
+# The EXTERN lines are put into two files: one of which contains prototypes
+# which are always appended to the db.h file, and one of which contains a
+# #define list for use when creating unique symbol names.
+#
+# Four arguments:
+# e_dfile list of EXTERN #defines
+# e_pfile include file that contains EXTERN prototypes
+# i_dfile list of internal (PUBLIC) #defines
+# i_pfile include file that contains internal (PUBLIC) prototypes
+/PUBLIC:/ {
+ sub("^.*PUBLIC:[ ][ ]*", "")
+ if ($0 ~ /^#(if|ifdef|ifndef|else|endif)/) {
+ print $0 >> i_pfile
+ print $0 >> i_dfile
+ next
+ }
+ pline = sprintf("%s %s", pline, $0)
+ if (pline ~ /\)\);/) {
+ sub("^[ ]*", "", pline)
+ print pline >> i_pfile
+ if (pline !~ db_version_unique_name) {
+ sub("[ ][ ]*__P.*", "", pline)
+ sub("^.*[ ][*]*", "", pline)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ pline, pline) >> i_dfile
+ }
+ pline = ""
+ }
+}
+
+# When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+# were the interfaces applications would likely use and not be willing to
+# change, due to the sheer volume of the calls. Provide wrappers -- we
+# could do txn_abort and txn_commit using macros, but not txn_begin, as
+# the name of the field is txn_begin, we didn't want to modify it.
+#
+# The issue with txn_begin hits us in another way. If configured with the
+# --with-uniquename option, we use #defines to re-define DB's interfaces
+# to unique names. We can't do that for these functions because txn_begin
+# is also a field name in the DB_ENV structure, and the #defines we use go
+# at the end of the db.h file -- we get control too late to #define a field
+# name. So, modify the script that generates the unique names #defines to
+# not generate them for these three functions, and don't include the three
+# functions in libraries built with that configuration option.
+/EXTERN:/ {
+ sub("^.*EXTERN:[ ][ ]*", "")
+ if ($0 ~ /^#(if|ifdef|ifndef|else|endif)/) {
+ print $0 >> e_pfile
+ print $0 >> e_dfile
+ next
+ }
+ eline = sprintf("%s %s", eline, $0)
+ if (eline ~ /\)\);/) {
+ sub("^[ ]*", "", eline)
+ print eline >> e_pfile
+ if (eline !~ db_version_unique_name && eline !~ /^int txn_/) {
+ sub("[ ][ ]*__P.*", "", eline)
+ sub("^.*[ ][*]*", "", eline)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ eline, eline) >> e_dfile
+ }
+ eline = ""
+ }
+}
diff --git a/storage/bdb/dist/gen_rec.awk b/storage/bdb/dist/gen_rec.awk
new file mode 100644
index 00000000000..75f2e86ca9e
--- /dev/null
+++ b/storage/bdb/dist/gen_rec.awk
@@ -0,0 +1,844 @@
+#!/bin/sh -
+#
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: gen_rec.awk,v 11.70 2002/08/08 15:44:47 bostic Exp $
+#
+
+# This awk script generates all the log, print, and read routines for the DB
+# logging. It also generates a template for the recovery functions (these
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# For a given file prefix.src, we generate a file prefix_auto.c, and a file
+# prefix_auto.h that contains:
+#
+# external declarations for the file's functions
+# defines for the physical record types
+# (logical types are defined in each subsystem manually)
+# structures to contain the data unmarshalled from the log.
+#
+# This awk script requires that four variables be set when it is called:
+#
+# source_file -- the C source file being created
+# header_file -- the C #include file being created
+# template_file -- the template file being created
+#
+# And stdin must be the input file that defines the recovery setup.
+#
+# Within each file prefix.src, we use a number of public keywords (documented
+# in the reference guide) as well as the following ones which are private to
+# DB:
+# DBPRIVATE Indicates that a file will be built as part of DB,
+# rather than compiled independently, and so can use
+# DB-private interfaces (such as DB_NOCOPY).
+# DB A DB handle. Logs the dbreg fileid for that handle,
+# and makes the *_log interface take a DB * instead of a
+# DB_ENV *.
+# PGDBT Just like DBT, only we know it stores a page or page
+# header, so we can byte-swap it (once we write the
+# byte-swapping code, which doesn't exist yet).
+# WRLOCK
+# WRLOCKNZ An ARG that stores a db_pgno_t, which the getpgnos
+# function should acquire a lock on. WRLOCK implies
+# that we should always get the lock; WRLOCKNZ implies
+# that we should do so if and only if the pgno is non-zero
+# (unfortunately, 0 is both PGNO_INVALID and the main
+# metadata page number).
+
+BEGIN {
+ if (source_file == "" ||
+ header_file == "" || template_file == "") {
+ print "Usage: gen_rec.awk requires three variables to be set:"
+ print "\tsource_file\t-- the C source file being created"
+ print "\theader_file\t-- the C #include file being created"
+ print "\ttemplate_file\t-- the template file being created"
+ exit
+ }
+ FS="[\t ][\t ]*"
+ CFILE=source_file
+ HFILE=header_file
+ TFILE=template_file
+ dbprivate = 0
+}
+/^[ ]*DBPRIVATE/ {
+ dbprivate = 1
+}
+/^[ ]*PREFIX/ {
+ prefix = $2
+ num_funcs = 0;
+
+ # Start .c file.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n") \
+ > CFILE
+
+ # Start .h file, make the entire file conditional.
+ printf("/* Do not edit: automatically built by gen_rec.awk. */\n\n") \
+ > HFILE
+ printf("#ifndef\t%s_AUTO_H\n#define\t%s_AUTO_H\n", prefix, prefix) \
+ >> HFILE;
+
+ # Write recovery template file headers
+ # This assumes we're doing DB recovery.
+ printf("#include \"db_config.h\"\n\n") > TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"dbinc/db_page.h\"\n") >> TFILE
+ printf("#include \"dbinc/%s.h\"\n", prefix) >> TFILE
+ printf("#include \"dbinc/log.h\"\n\n") >> TFILE
+}
+/^[ ]*INCLUDE/ {
+ if ($3 == "")
+ printf("%s\n", $2) >> CFILE
+ else
+ printf("%s %s\n", $2, $3) >> CFILE
+}
+/^[ ]*(BEGIN|IGNORED)/ {
+ if (in_begin) {
+ print "Invalid format: missing END statement"
+ exit
+ }
+ in_begin = 1;
+ is_dbt = 0;
+ has_dbp = 0;
+ is_uint = 0;
+ need_log_function = ($1 == "BEGIN");
+ nvars = 0;
+
+ # number of locks that the getpgnos functions will return
+ nlocks = 0;
+
+ thisfunc = $2;
+ funcname = sprintf("%s_%s", prefix, $2);
+
+ rectype = $3;
+
+ funcs[num_funcs] = funcname;
+ ++num_funcs;
+}
+/^[ ]*(DB|ARG|DBT|PGDBT|POINTER|WRLOCK|WRLOCKNZ)/ {
+ vars[nvars] = $2;
+ types[nvars] = $3;
+ atypes[nvars] = $1;
+ modes[nvars] = $1;
+ formats[nvars] = $NF;
+ for (i = 4; i < NF; i++)
+ types[nvars] = sprintf("%s %s", types[nvars], $i);
+
+ if ($1 == "DB") {
+ has_dbp = 1;
+ }
+
+ if ($1 == "DB" || $1 == "ARG" || $1 == "WRLOCK" || $1 == "WRLOCKNZ") {
+ sizes[nvars] = sprintf("sizeof(u_int32_t)");
+ is_uint = 1;
+ } else if ($1 == "POINTER")
+ sizes[nvars] = sprintf("sizeof(*%s)", $2);
+ else { # DBT, PGDBT
+ sizes[nvars] = \
+ sprintf("sizeof(u_int32_t) + (%s == NULL ? 0 : %s->size)", \
+ $2, $2);
+ is_dbt = 1;
+ }
+ nvars++;
+}
+/^[ ]*(WRLOCK|WRLOCKNZ)/ {
+ nlocks++;
+
+ if ($1 == "WRLOCK") {
+ lock_if_zero[nlocks] = 1;
+ } else {
+ lock_if_zero[nlocks] = 0;
+ }
+
+ lock_pgnos[nlocks] = $2;
+}
+/^[ ]*END/ {
+ if (!in_begin) {
+ print "Invalid format: missing BEGIN statement"
+ exit;
+ }
+
+ # Declare the record type.
+ printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE
+
+ # Structure declaration.
+ printf("typedef struct _%s_args {\n", funcname) >> HFILE
+
+ # Here are the required fields for every structure
+ printf("\tu_int32_t type;\n\tDB_TXN *txnid;\n") >> HFILE
+ printf("\tDB_LSN prev_lsn;\n") >>HFILE
+
+ # Here are the specified fields.
+ for (i = 0; i < nvars; i++) {
+ t = types[i];
+ if (modes[i] == "POINTER") {
+ ndx = index(t, "*");
+ t = substr(types[i], 0, ndx - 2);
+ }
+ printf("\t%s\t%s;\n", t, vars[i]) >> HFILE
+ }
+ printf("} %s_args;\n\n", funcname) >> HFILE
+
+ # Output the log, print, read, and getpgnos functions.
+ if (need_log_function) {
+ log_function();
+
+ # The getpgnos function calls DB-private (__rep_*) functions,
+ # so we only generate it for our own logging functions,
+ # not application-specific ones.
+ if (dbprivate) {
+ getpgnos_function();
+ }
+ }
+ print_function();
+ read_function();
+
+ # Recovery template
+ cmd = sprintf(\
+ "sed -e s/PREF/%s/ -e s/FUNC/%s/ < template/rec_ctemp >> %s",
+ prefix, thisfunc, TFILE)
+ system(cmd);
+
+ # Done writing stuff, reset and continue.
+ in_begin = 0;
+}
+
+END {
+ # End the conditional for the HFILE
+ printf("#endif\n") >> HFILE;
+
+ # Print initialization routine; function prototype
+ p[1] = sprintf("int %s_init_print %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call __db_add_recovery(print_fn, id)
+ printf("int\n%s_init_print(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n") >> CFILE;
+ # If application-specific, the user will need a prototype for
+ # __db_add_recovery, since they won't have DB's.
+ if (!dbprivate) {
+ printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> CFILE;
+ printf(\
+"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> CFILE;
+ printf("\t size_t *,\n") >> CFILE;
+ printf(\
+"\t int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));\n") \
+ >> CFILE;
+ }
+
+ printf("\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_print, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # We only want to generate *_init_{getpgnos,recover} functions
+ # if this is a DB-private, rather than application-specific,
+ # set of recovery functions. Application-specific recovery functions
+ # should be dispatched using the DB_ENV->set_app_dispatch callback
+ # rather than a DB dispatch table ("dtab").
+ if (!dbprivate)
+ exit
+
+ # Page number initialization routine; function prototype
+ p[1] = sprintf("int %s_init_getpgnos %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(pgno_fn, id)
+ printf("int\n%s_init_getpgnos(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_getpgnos, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+
+ # Recover initialization routine
+ p[1] = sprintf("int %s_init_recover %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(func, id)
+ printf("int\n%s_init_recover(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_recover, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n") >> CFILE;
+}
+
+function log_function() {
+ # Write the log function; function prototype
+ pi = 1;
+ p[pi++] = sprintf("int %s_log", funcname);
+ p[pi++] = " ";
+ if (has_dbp == 1) {
+ p[pi++] = "__P((DB *, DB_TXN *, DB_LSN *, u_int32_t";
+ } else {
+ p[pi++] = "__P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t";
+ }
+ for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DB")
+ continue;
+ p[pi++] = ", ";
+ p[pi++] = sprintf("%s%s%s",
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? "const " : "",
+ types[i],
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? " *" : "");
+ }
+ p[pi++] = "";
+ p[pi++] = "));";
+ p[pi++] = "";
+ proto_format(p);
+
+ # Function declaration
+ if (has_dbp == 1) {
+ printf("int\n%s_log(dbp, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ } else {
+ printf("int\n%s_log(dbenv, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ }
+ for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DB") {
+ # We pass in fileids on the dbp, so if this is one,
+ # skip it.
+ continue;
+ }
+ printf(",") >> CFILE;
+ if ((i % 6) == 0)
+ printf("\n ") >> CFILE;
+ else
+ printf(" ") >> CFILE;
+ printf("%s", vars[i]) >> CFILE;
+ }
+ printf(")\n") >> CFILE;
+
+ # Now print the parameters
+ if (has_dbp == 1) {
+ printf("\tDB *dbp;\n") >> CFILE;
+ } else {
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ }
+ printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE;
+ printf("\tu_int32_t flags;\n") >> CFILE;
+ for (i = 0; i < nvars; i++) {
+ # We just skip for modes == DB.
+ if (modes[i] == "DBT" || modes[i] == "PGDBT")
+ printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE;
+ else if (modes[i] != "DB")
+ printf("\t%s %s;\n", types[i], vars[i]) >> CFILE;
+ }
+
+ # Function body and local decls
+ printf("{\n") >> CFILE;
+ printf("\tDBT logrec;\n") >> CFILE;
+ if (has_dbp == 1)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp, null_lsn;\n") >> CFILE;
+ if (is_dbt == 1)
+ printf("\tu_int32_t zero;\n") >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
+ printf("\tu_int32_t npad, rectype, txn_num;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tu_int8_t *bp;\n\n") >> CFILE;
+
+ # Initialization
+ if (has_dbp == 1)
+ printf("\tdbenv = dbp->dbenv;\n") >> CFILE;
+ printf("\trectype = DB_%s;\n", funcname) >> CFILE;
+ printf("\tnpad = 0;\n\n") >> CFILE;
+
+ printf("\tif (txnid == NULL) {\n") >> CFILE;
+ printf("\t\ttxn_num = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.file = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.offset = 0;\n") >> CFILE;
+ printf("\t\tlsnp = &null_lsn;\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ if (funcname != "__db_debug" && dbprivate) {
+ printf(\
+ "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE;
+ printf("\t\t (ret = __txn_activekids(") >> CFILE;
+ printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE;
+ printf("\t\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE;
+ printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
+ printf("\t}\n\n") >> CFILE;
+
+ # Malloc
+ printf("\tlogrec.size = sizeof(rectype) + ") >> CFILE;
+ printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE;
+ for (i = 0; i < nvars; i++)
+ printf("\n\t + %s", sizes[i]) >> CFILE;
+ printf(";\n") >> CFILE
+ if (dbprivate) {
+ printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE;
+ printf("\t\tnpad =\n") >> CFILE
+ printf(\
+"\t\t ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);\n")\
+ >> CFILE;
+ printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE
+ }
+ write_malloc("logrec.data", "logrec.size", CFILE)
+ printf("\tif (npad > 0)\n") >> CFILE;
+ printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") >> CFILE;
+ printf("- npad, 0, npad);\n\n") >> CFILE;
+
+ # Copy args into buffer
+ printf("\tbp = logrec.data;\n\n") >> CFILE;
+ printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE;
+ printf("\tbp += sizeof(rectype);\n\n") >> CFILE;
+ printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE;
+ printf("\tbp += sizeof(txn_num);\n\n") >> CFILE;
+ printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
+
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ") {
+ printf("\tuinttmp = (u_int32_t)%s;\n", \
+ vars[i]) >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
+ } else if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE;
+ printf("\t\tzero = 0;\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \
+ >> CFILE;
+ printf("\t\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ printf("\t\tmemcpy(bp, &%s->size, ", vars[i]) >> CFILE;
+ printf("sizeof(%s->size));\n", vars[i]) >> CFILE;
+ printf("\t\tbp += sizeof(%s->size);\n", vars[i]) \
+ >> CFILE;
+ printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\t\tbp += %s->size;\n\t}\n\n", \
+ vars[i]) >> CFILE;
+ } else if (modes[i] == "DB") {
+ # We need to log a DB handle. To do this, we
+ # actually just log its fileid; from that, we'll
+ # be able to acquire an open handle at recovery time.
+ printf("\tDB_ASSERT(dbp->log_filename != NULL);\n") \
+ >> CFILE;
+ printf("\tif (dbp->log_filename->id == ") >> CFILE;
+ printf("DB_LOGFILEID_INVALID &&\n\t ") >> CFILE
+ printf("(ret = __dbreg_lazy_id(dbp)) != 0)\n") \
+ >> CFILE;
+ printf("\t\treturn (ret);\n\n") >> CFILE;
+
+ printf("\tuinttmp = ") >> CFILE;
+ printf("(u_int32_t)dbp->log_filename->id;\n") >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
+ } else { # POINTER
+ printf("\tif (%s != NULL)\n", vars[i]) >> CFILE;
+ printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \
+ sizes[i]) >> CFILE;
+ printf("\telse\n") >> CFILE;
+ printf("\t\tmemset(bp, 0, %s);\n", sizes[i]) >> CFILE;
+ printf("\tbp += %s;\n\n", sizes[i]) >> CFILE;
+ }
+ }
+
+ # Error checking. User code won't have DB_ASSERT available, but
+ # this is a pretty unlikely assertion anyway, so we just leave it out
+ # rather than requiring assert.h.
+ if (dbprivate) {
+ printf("\tDB_ASSERT((u_int32_t)") >> CFILE;
+ printf("(bp - (u_int8_t *)logrec.data) <= logrec.size);\n") \
+ >> CFILE;
+ }
+
+ # Issue log call
+ # We didn't call the crypto alignment function when we created this
+ # log record (because we don't have the right header files to find
+ # the function), so we have to copy the log record to make sure the
+ # alignment is correct.
+ printf(\
+ "\tret = dbenv->log_put(dbenv,\n\t ret_lsnp, (DBT *)&logrec, ") \
+ >> CFILE;
+ if (dbprivate) {
+ printf("flags | DB_NOCOPY);\n") >> CFILE;
+ } else {
+ printf("flags);\n") >> CFILE;
+ }
+
+ # Update the transactions last_lsn
+ printf("\tif (txnid != NULL && ret == 0)\n") >> CFILE;
+ printf("\t\ttxnid->last_lsn = *ret_lsnp;\n") >> CFILE;
+
+ # If out of disk space log writes may fail. If we are debugging
+ # that print out which records did not make it to disk.
+ printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE
+ printf("\tif (ret != 0)\n") >> CFILE;
+ printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE;
+ printf("\t\t (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") >> CFILE
+ printf("#endif\n") >> CFILE
+
+ # Free and return
+ write_free("logrec.data", CFILE)
+ printf("\treturn (ret);\n}\n\n") >> CFILE;
+}
+
+function print_function() {
+ # Write the print function; function prototype
+ p[1] = sprintf("int %s_print", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_print(dbenv, ", funcname) >> CFILE;
+ printf("dbtp, lsnp, notused2, notused3)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *dbtp;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> CFILE;
+
+ # Locals
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ for (i = 0; i < nvars; i ++)
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tu_int32_t i;\n") >> CFILE
+ printf("\tint ch;\n") >> CFILE
+ break;
+ }
+
+ printf("\tint ret;\n\n") >> CFILE;
+
+ # Get rid of complaints about unused parameters.
+ printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> CFILE;
+
+ # Call read routine to initialize structure
+ printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Print values in every record
+ printf("\t(void)printf(\n\t \"[%%lu][%%lu]%s: ", funcname) >> CFILE;
+ printf("rec: %%lu txnid %%lx ") >> CFILE;
+ printf("prevlsn [%%lu][%%lu]\\n\",\n") >> CFILE;
+ printf("\t (u_long)lsnp->file,\n") >> CFILE;
+ printf("\t (u_long)lsnp->offset,\n") >> CFILE;
+ printf("\t (u_long)argp->type,\n") >> CFILE;
+ printf("\t (u_long)argp->txnid->txnid,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.file,\n") >> CFILE;
+ printf("\t (u_long)argp->prev_lsn.offset);\n") >> CFILE;
+
+ # Now print fields of argp
+ for (i = 0; i < nvars; i ++) {
+ printf("\t(void)printf(\"\\t%s: ", vars[i]) >> CFILE;
+
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\");\n") >> CFILE;
+ printf("\tfor (i = 0; i < ") >> CFILE;
+ printf("argp->%s.size; i++) {\n", vars[i]) >> CFILE;
+ printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \
+ vars[i]) >> CFILE;
+ printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> CFILE;
+ printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> CFILE;
+ printf("\t}\n\t(void)printf(\"\\n\");\n") >> CFILE;
+ } else if (types[i] == "DB_LSN *") {
+ printf("[%%%s][%%%s]\\n\",\n", \
+ formats[i], formats[i]) >> CFILE;
+ printf("\t (u_long)argp->%s.file,", \
+ vars[i]) >> CFILE;
+ printf(" (u_long)argp->%s.offset);\n", \
+ vars[i]) >> CFILE;
+ } else {
+ if (formats[i] == "lx")
+ printf("0x") >> CFILE;
+ printf("%%%s\\n\", ", formats[i]) >> CFILE;
+ if (formats[i] == "lx" || formats[i] == "lu")
+ printf("(u_long)") >> CFILE;
+ if (formats[i] == "ld")
+ printf("(long)") >> CFILE;
+ printf("argp->%s);\n", vars[i]) >> CFILE;
+ }
+ }
+ printf("\t(void)printf(\"\\n\");\n") >> CFILE;
+ write_free("argp", CFILE);
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+}
+
+function read_function() {
+ # Write the read function; function prototype
+ p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname);
+ p[2] = " ";
+ p[3] = sprintf("%s_args **));", funcname);
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE;
+
+ # Now print the parameters
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tvoid *recbuf;\n") >> CFILE;
+ printf("\t%s_args **argpp;\n", funcname) >> CFILE;
+
+ # Function body and local decls
+ printf("{\n\t%s_args *argp;\n", funcname) >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
+ printf("\tu_int8_t *bp;\n") >> CFILE;
+
+
+ if (dbprivate) {
+ # We only use dbenv and ret in the private malloc case.
+ printf("\tint ret;\n\n") >> CFILE;
+ } else {
+ printf("\t/* Keep the compiler quiet. */\n") >> CFILE;
+ printf("\n\tdbenv = NULL;\n") >> CFILE;
+ }
+
+ malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", funcname)
+ write_malloc("argp", malloc_size, CFILE)
+
+ # Set up the pointers to the txnid.
+ printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE;
+
+ # First get the record type, prev_lsn, and txnid fields.
+
+ printf("\tbp = recbuf;\n") >> CFILE;
+ printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->type);\n\n") >> CFILE;
+ printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE;
+ printf("sizeof(argp->txnid->txnid));\n") >> CFILE;
+ printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE;
+ printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
+
+ # Now get rest of data.
+ for (i = 0; i < nvars; i ++) {
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \
+ vars[i], vars[i]) >> CFILE;
+ printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE;
+ printf("bp, sizeof(u_int32_t));\n") >> CFILE;
+ printf("\tbp += sizeof(u_int32_t);\n") >> CFILE;
+ printf("\targp->%s.data = bp;\n", vars[i]) >> CFILE;
+ printf("\tbp += argp->%s.size;\n", vars[i]) >> CFILE;
+ } else if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ" || modes[i] == "DB") {
+ printf("\tmemcpy(&uinttmp, bp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\targp->%s = (%s)uinttmp;\n", vars[i], \
+ types[i]) >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n") >> CFILE;
+ } else { # POINTER
+ printf("\tmemcpy(&argp->%s, bp, ", vars[i]) >> CFILE;
+ printf(" sizeof(argp->%s));\n", vars[i]) >> CFILE;
+ printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
+ }
+ printf("\n") >> CFILE;
+ }
+
+ # Free and return
+ printf("\t*argpp = argp;\n") >> CFILE;
+ printf("\treturn (0);\n}\n\n") >> CFILE;
+}
+
+function getpgnos_function() {
+ # Write the getpgnos function; function prototype
+ p[1] = sprintf("int %s_getpgnos", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_getpgnos(dbenv, ", funcname) >> CFILE;
+ printf("rec, lsnp, notused1, summary)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *rec;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused1;\n") >> CFILE;
+ printf("\tvoid *summary;\n{\n") >> CFILE;
+
+ # If there are no locks, return this fact.
+ if (nlocks == 0) {
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tCOMPQUIET(rec, NULL);\n") >> CFILE;
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n") >> CFILE;
+
+ printf("\n\tt = (TXN_RECS *)summary;\n") >> CFILE;
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, ") >> CFILE;
+ printf("t, 1)) != 0)\n") >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ printf("\n\tt->array[t->npages].flags = LSN_PAGE_NOLOCK;\n") \
+ >> CFILE;
+ printf("\tt->array[t->npages].lsn = *lsnp;\n") >> CFILE;
+ printf("\tt->array[t->npages].fid = DB_LOGFILEID_INVALID;\n") \
+ >> CFILE;
+ printf("\tmemset(&t->array[t->npages].pgdesc, 0,\n") >> CFILE;
+ printf("\t sizeof(t->array[t->npages].pgdesc));\n") >> CFILE;
+ printf("\n\tt->npages++;\n") >> CFILE;
+
+ printf("\n") >> CFILE;
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+ return;
+ }
+
+ # Locals
+ printf("\tDB *dbp;\n") >> CFILE;
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ printf("\tu_int32_t ret;\n\n") >> CFILE;
+
+ # Shut up compiler.
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n\n") >> CFILE;
+
+ printf("\targp = NULL;\n") >> CFILE;
+ printf("\tt = (TXN_RECS *)summary;\n\n") >> CFILE;
+
+ printf("\tif ((ret = %s_read(dbenv, rec->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Get file ID.
+ printf("\n\tif ((ret = __dbreg_id_to_db(dbenv,\n\t ") >> CFILE;
+ printf("argp->txnid, &dbp, argp->fileid, 0)) != 0)\n") >> CFILE;
+ printf("\t\tgoto err;\n") >> CFILE;
+
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, t, %d)) != 0)\n", \
+ nlocks) >> CFILE;
+ printf("\t\tgoto err;\n\n") >> CFILE;
+
+ for (i = 1; i <= nlocks; i++) {
+ if (lock_if_zero[i]) {
+ indent = "\t";
+ } else {
+ indent = "\t\t";
+ printf("\tif (argp->%s != PGNO_INVALID) {\n", \
+ lock_pgnos[i]) >> CFILE;
+ }
+ printf("%st->array[t->npages].flags = 0;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].fid = argp->fileid;\n", indent) \
+ >> CFILE;
+ printf("%st->array[t->npages].lsn = *lsnp;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.pgno = argp->%s;\n", \
+ indent, lock_pgnos[i]) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.type = DB_PAGE_LOCK;\n", \
+ indent) >> CFILE;
+ printf("%smemcpy(t->array[t->npages].pgdesc.fileid, ", indent) \
+ >> CFILE;
+ printf("dbp->fileid,\n%s DB_FILE_ID_LEN);\n", \
+ indent, indent) >> CFILE;
+ printf("%st->npages++;\n", indent) >> CFILE;
+ if (!lock_if_zero[i]) {
+ printf("\t}\n") >> CFILE;
+ }
+ }
+
+ printf("\nerr:\tif (argp != NULL)\n") >> CFILE;
+ write_free("argp", CFILE);
+
+ printf("\treturn (ret);\n") >> CFILE;
+
+ printf("}\n\n") >> CFILE;
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p)
+{
+ printf("/*\n") >> CFILE;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ printf("%s%s", t, s) >> CFILE;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> CFILE
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 70) {
+ printf("\n * PUBLIC: ") >> CFILE;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> CFILE;
+ len += length(comma[i]) + 2;
+ }
+ }
+ printf("\n */\n") >> CFILE;
+ delete p;
+}
+
+function write_malloc(ptr, size, file)
+{
+ if (dbprivate) {
+ printf("\tif ((ret = ") >> file;
+ printf(\
+ "__os_malloc(dbenv,\n\t " size ", &" ptr ")) != 0)\n") \
+ >> file
+ printf("\t\treturn (ret);\n\n") >> file;
+ } else {
+ printf("\tif ((" ptr " = malloc(" size ")) == NULL)\n") >> file
+ printf("\t\treturn (ENOMEM);\n\n") >> file
+ }
+}
+
+function write_free(ptr, file)
+{
+ if (dbprivate) {
+ printf("\t__os_free(dbenv, " ptr ");\n") >> file
+ } else {
+ printf("\tfree(" ptr ");\n") >> file
+ }
+}
diff --git a/storage/bdb/dist/gen_rpc.awk b/storage/bdb/dist/gen_rpc.awk
new file mode 100644
index 00000000000..03975d7321b
--- /dev/null
+++ b/storage/bdb/dist/gen_rpc.awk
@@ -0,0 +1,1214 @@
+#
+# $Id: gen_rpc.awk,v 11.50 2002/07/02 19:26:57 sue Exp $
+# Awk script for generating client/server RPC code.
+#
+# This awk script generates most of the RPC routines for DB client/server
+# use. It also generates a template for server and client procedures. These
+# functions must still be edited, but are highly stylized and the initial
+# template gets you a fair way along the path).
+#
+# This awk script requires that these variables be set when it is called:
+#
+# major -- Major version number
+# minor -- Minor version number
+# xidsize -- size of GIDs
+# client_file -- the C source file being created for client code
+# ctmpl_file -- the C template file being created for client code
+# sed_file -- the sed file created to alter server proc code
+# server_file -- the C source file being created for server code
+# stmpl_file -- the C template file being created for server code
+# xdr_file -- the XDR message file created
+#
+# And stdin must be the input file that defines the RPC setup.
+BEGIN {
+ if (major == "" || minor == "" || xidsize == "" ||
+ client_file == "" || ctmpl_file == "" ||
+ sed_file == "" || server_file == "" ||
+ stmpl_file == "" || xdr_file == "") {
+ print "Usage: gen_rpc.awk requires these variables be set:"
+ print "\tmajor\t-- Major version number"
+ print "\tminor\t-- Minor version number"
+ print "\txidsize\t-- GID size"
+ print "\tclient_file\t-- the client C source file being created"
+ print "\tctmpl_file\t-- the client template file being created"
+ print "\tsed_file\t-- the sed command file being created"
+ print "\tserver_file\t-- the server C source file being created"
+ print "\tstmpl_file\t-- the server template file being created"
+ print "\txdr_file\t-- the XDR message file being created"
+ error = 1; exit
+ }
+
+ FS="\t\t*"
+ CFILE=client_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > CFILE
+
+ TFILE = ctmpl_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > TFILE
+
+ SFILE = server_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > SFILE
+
+ # Server procedure template and a sed file to massage an existing
+ # template source file to change args.
+ # SEDFILE should be same name as PFILE but .c
+ #
+ PFILE = stmpl_file
+ SEDFILE = sed_file
+ printf("") > SEDFILE
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > PFILE
+
+ XFILE = xdr_file
+ printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
+ > XFILE
+ nendlist = 1;
+}
+END {
+ printf("#endif /* HAVE_RPC */\n") >> CFILE
+ printf("#endif /* HAVE_RPC */\n") >> TFILE
+ printf("program DB_RPC_SERVERPROG {\n") >> XFILE
+ printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE
+
+ for (i = 1; i < nendlist; ++i)
+ printf("\t\t%s;\n", endlist[i]) >> XFILE
+
+ printf("\t} = %d%03d;\n", major, minor) >> XFILE
+ printf("} = 351457;\n") >> XFILE
+}
+
+/^[ ]*BEGIN/ {
+ name = $2;
+ nofunc_code = 0;
+ funcvars = 0;
+ ret_code = 0;
+ if ($3 == "NOFUNC")
+ nofunc_code = 1;
+ if ($3 == "RETCODE")
+ ret_code = 1;
+
+ nvars = 0;
+ rvars = 0;
+ newvars = 0;
+ db_handle = 0;
+ env_handle = 0;
+ dbc_handle = 0;
+ txn_handle = 0;
+ mp_handle = 0;
+ dbt_handle = 0;
+ xdr_free = 0;
+}
+/^[ ]*ARG/ {
+ rpc_type[nvars] = $2;
+ c_type[nvars] = $3;
+ pr_type[nvars] = $3;
+ args[nvars] = $4;
+ func_arg[nvars] = 0;
+ if (rpc_type[nvars] == "LIST") {
+ list_type[nvars] = $5;
+ } else
+ list_type[nvars] = 0;
+
+ if (c_type[nvars] == "DBT *")
+ dbt_handle = 1;
+
+ if (c_type[nvars] == "DB_ENV *") {
+ ctp_type[nvars] = "CT_ENV";
+ env_handle = 1;
+ env_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB *") {
+ ctp_type[nvars] = "CT_DB";
+ if (db_handle != 1) {
+ db_handle = 1;
+ db_idx = nvars;
+ }
+ }
+
+ if (c_type[nvars] == "DBC *") {
+ ctp_type[nvars] = "CT_CURSOR";
+ dbc_handle = 1;
+ dbc_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_TXN *") {
+ ctp_type[nvars] = "CT_TXN";
+ txn_handle = 1;
+ txn_idx = nvars;
+ }
+
+ if (c_type[nvars] == "DB_MPOOLFILE *") {
+ mp_handle = 1;
+ mp_idx = nvars;
+ }
+
+ ++nvars;
+}
+/^[ ]*FUNCPROT/ {
+ pr_type[nvars] = $2;
+}
+/^[ ]*FUNCARG/ {
+ rpc_type[nvars] = "IGNORE";
+ c_type[nvars] = $2;
+ args[nvars] = sprintf("func%d", funcvars);
+ func_arg[nvars] = 1;
+ ++funcvars;
+ ++nvars;
+}
+/^[ ]*RET/ {
+ ret_type[rvars] = $2;
+ retc_type[rvars] = $3;
+ retargs[rvars] = $4;
+ if (ret_type[rvars] == "LIST" || ret_type[rvars] == "DBT") {
+ xdr_free = 1;
+ }
+ if (ret_type[rvars] == "LIST") {
+ retlist_type[rvars] = $5;
+ } else
+ retlist_type[rvars] = 0;
+
+ ++rvars;
+}
+/^[ ]*END/ {
+ #
+ # =====================================================
+ # File headers, if necessary.
+ #
+ if (first == 0) {
+ printf("#include \"db_config.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#ifdef HAVE_RPC\n") >> CFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
+ printf("#include <sys/types.h>\n\n") >> CFILE
+ printf("#include <rpc/rpc.h>\n") >> CFILE
+ printf("#include <rpc/xdr.h>\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include <string.h>\n") >> CFILE
+ printf("#endif\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"db_int.h\"\n") >> CFILE
+ printf("#include \"dbinc/txn.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> CFILE
+ printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+
+ printf("#include \"db_config.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#ifdef HAVE_RPC\n") >> TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n") >> TFILE
+ printf("#include <rpc/rpc.h>\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> TFILE
+ printf("#include \"dbinc/txn.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+
+ printf("#include \"db_config.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
+ printf("#include <sys/types.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <rpc/rpc.h>\n") >> SFILE
+ printf("#include <rpc/xdr.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <string.h>\n") >> SFILE
+ printf("#endif\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include \"db_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> SFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+
+ printf("#include \"db_config.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
+ printf("#include <sys/types.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <rpc/rpc.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <string.h>\n") >> PFILE
+ printf("#endif\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include \"db_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> PFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+
+ first = 1;
+ }
+ #
+ # =====================================================
+ # Generate Client Nofunc code first if necessary
+ # NOTE: This code must be first, because we don't want any
+ # other code other than this function, so before we write
+ # out to the XDR and server files, we just generate this
+ # and move on if this is all we are doing.
+ #
+ if (nofunc_code == 1) {
+ #
+ # First time through, put out the general no server and
+ # illegal functions.
+ #
+ if (first_nofunc == 0) {
+ printf("static int __dbcl_noserver ") >> CFILE
+ printf("__P((DB_ENV *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_noserver(dbenv)\n") >> CFILE
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"No server environment\");\n") >> CFILE
+ printf("\treturn (DB_NOSERVER);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ printf("static int __dbcl_rpc_illegal ") >> CFILE
+ printf("__P((DB_ENV *, char *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_rpc_illegal(dbenv, name)\n") >> CFILE
+ printf("\tDB_ENV *dbenv;\n\tchar *name;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"%%s method meaningless in an RPC") >> CFILE
+ printf(" environment\", name);\n") >> CFILE
+ printf("\treturn (__db_eopnotsup(dbenv));\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ first_nofunc = 1
+ }
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ #
+ # Call error function and return EINVAL
+ #
+ printf("{\n") >> CFILE
+
+ #
+ # If we don't have a local env, set one.
+ #
+ if (env_handle == 0) {
+ printf("\tDB_ENV *dbenv;\n\n") >> CFILE
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", \
+ args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ else if (mp_handle)
+ printf("\tdbenv = %s->dbmp->dbenv;\n", \
+ args[mp_idx]) >> CFILE
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ }
+ #
+ # Quiet the compiler for all variables.
+ #
+ # NOTE: Index 'i' starts at 1, not 0. Our first arg is
+ # the handle we need to get to the env, and we do not want
+ # to COMPQUIET that one.
+ for (i = 1; i < nvars; ++i) {
+ if (rpc_type[i] == "CONST" || rpc_type[i] == "DBT" ||
+ rpc_type[i] == "LIST" || rpc_type[i] == "STRING" ||
+ rpc_type[i] == "GID") {
+ printf("\tCOMPQUIET(%s, NULL);\n", args[i]) \
+ >> CFILE
+ }
+ if (rpc_type[i] == "INT" || rpc_type[i] == "IGNORE" ||
+ rpc_type[i] == "ID") {
+ printf("\tCOMPQUIET(%s, 0);\n", args[i]) \
+ >> CFILE
+ }
+ }
+
+ if (!env_handle) {
+ printf("\treturn (__dbcl_rpc_illegal(dbenv, ") >> CFILE
+ printf("\"%s\"));\n", name) >> CFILE
+ } else
+ printf("\treturn (__dbcl_rpc_illegal(%s, \"%s\"));\n", \
+ args[env_idx], name) >> CFILE
+ printf("}\n\n") >> CFILE
+
+ next;
+ }
+
+ #
+ # =====================================================
+ # XDR messages.
+ #
+ printf("\n") >> XFILE
+ printf("struct __%s_msg {\n", name) >> XFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ if (list_type[i] == "GID") {
+ printf("\topaque %s<>;\n", args[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", args[i]) >> XFILE
+ }
+ }
+ if (rpc_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\topaque %s[%d];\n", args[i], xidsize) >> XFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", args[i]) >> XFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tunsigned int %sdlen;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sdoff;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sulen;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sflags;\n", args[i]) >> XFILE
+ printf("\topaque %sdata<>;\n", args[i]) >> XFILE
+ }
+ }
+ printf("};\n") >> XFILE
+
+ printf("\n") >> XFILE
+ #
+ # Generate the reply message
+ #
+ printf("struct __%s_reply {\n", name) >> XFILE
+ printf("\tint status;\n") >> XFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID") {
+ printf("\tunsigned int %scl_id;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "STRING") {
+ printf("\tstring %s<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "INT") {
+ printf("\tunsigned int %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBL") {
+ printf("\tdouble %s;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\topaque %sdata<>;\n", retargs[i]) >> XFILE
+ }
+ if (ret_type[i] == "LIST") {
+ if (retlist_type[i] == "GID") {
+ printf("\topaque %s<>;\n", retargs[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", retargs[i]) >> XFILE
+ }
+ }
+ }
+ printf("};\n") >> XFILE
+
+ endlist[nendlist] = \
+ sprintf("__%s_reply __DB_%s(__%s_msg) = %d", \
+ name, name, name, nendlist);
+ nendlist++;
+ #
+ # =====================================================
+ # Server functions.
+ #
+ # First spit out PUBLIC prototypes for server functions.
+ #
+ p[1] = sprintf("__%s_reply *__db_%s_%d%03d __P((__%s_msg *, struct svc_req *));",
+ name, name, major, minor, name);
+ p[2] = "";
+ proto_format(p, 0, SFILE);
+
+ printf("__%s_reply *\n", name) >> SFILE
+ printf("__db_%s_%d%03d(msg, req)\n", name, major, minor) >> SFILE
+ printf("\t__%s_msg *msg;\n", name) >> SFILE;
+ printf("\tstruct svc_req *req;\n", name) >> SFILE;
+ printf("{\n") >> SFILE
+ printf("\tstatic __%s_reply reply; /* must be static */\n", \
+ name) >> SFILE
+ if (xdr_free) {
+ printf("\tstatic int __%s_free = 0; /* must be static */\n\n", \
+ name) >> SFILE
+ }
+ printf("\tCOMPQUIET(req, NULL);\n", name) >> SFILE
+ if (xdr_free) {
+ printf("\tif (__%s_free)\n", name) >> SFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)&reply);\n", \
+ name) >> SFILE
+ printf("\t__%s_free = 0;\n", name) >> SFILE
+ printf("\n\t/* Reinitialize allocated fields */\n") >> SFILE
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "LIST") {
+ printf("\treply.%s.%s_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
+ }
+ if (ret_type[i] == "DBT") {
+ printf("\treply.%sdata.%sdata_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
+ }
+ }
+ }
+
+ need_out = 0;
+ #
+ # Compose server proc to call. Decompose message components as args.
+ #
+ printf("\n\t__%s_proc(", name) >> SFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "IGNORE") {
+ continue;
+ }
+ if (rpc_type[i] == "ID") {
+ printf("%smsg->%scl_id", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s(*msg->%s == '\\0') ? NULL : msg->%s", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("%smsg->%s", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%smsg->%s", sep, args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%smsg->%s.%s_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%smsg->%s.%s_len", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%smsg->%sdlen", sep, args[i]) >> SFILE
+ sep = ",\n\t ";
+ printf("%smsg->%sdoff", sep, args[i]) >> SFILE
+ printf("%smsg->%sulen", sep, args[i]) >> SFILE
+ printf("%smsg->%sflags", sep, args[i]) >> SFILE
+ printf("%smsg->%sdata.%sdata_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%smsg->%sdata.%sdata_len", \
+ sep, args[i], args[i]) >> SFILE
+ }
+ sep = ",\n\t ";
+ }
+ printf("%s&reply", sep) >> SFILE
+ if (xdr_free)
+ printf("%s&__%s_free);\n", sep, name) >> SFILE
+ else
+ printf(");\n\n") >> SFILE
+ if (need_out) {
+ printf("\nout:\n") >> SFILE
+ }
+ printf("\treturn (&reply);\n") >> SFILE
+ printf("}\n\n") >> SFILE
+
+ #
+ # =====================================================
+ # Generate Procedure Template Server code
+ #
+ # Produce SED file commands if needed at the same time
+ #
+ # Spit out comment, prototype, function name and arg list.
+ #
+ printf("/^\\/\\* BEGIN __%s_proc/,/^\\/\\* END __%s_proc/c\\\n", \
+ name, name) >> SEDFILE
+
+ printf("/* BEGIN __%s_proc */\n", name) >> PFILE
+ printf("/* BEGIN __%s_proc */\\\n", name) >> SEDFILE
+
+ pi = 1;
+ p[pi++] = sprintf("void __%s_proc __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ p[pi++] = "long";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "STRING") {
+ p[pi++] = "char *";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "INT") {
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "DBT") {
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "void *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ }
+ }
+ p[pi++] = sprintf("__%s_reply *", name);
+ if (xdr_free) {
+ p[pi++] = ", ";
+ p[pi++] = "int *));";
+ } else {
+ p[pi++] = "";
+ p[pi++] = "));";
+ }
+ p[pi++] = "";
+ proto_format(p, 1, SEDFILE);
+
+ printf("void\n") >> PFILE
+ printf("void\\\n") >> SEDFILE
+ printf("__%s_proc(", name) >> PFILE
+ printf("__%s_proc(", name) >> SEDFILE
+ sep = "";
+ argcount = 0;
+ for (i = 0; i < nvars; ++i) {
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ }
+ if (rpc_type[i] == "IGNORE")
+ continue;
+ if (rpc_type[i] == "ID") {
+ printf("%s%scl_id", sep, args[i]) >> PFILE
+ printf("%s%scl_id", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%slen", sep, args[i]) >> PFILE
+ printf("%s%slen", sep, args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("%s%sdlen", sep, args[i]) >> PFILE
+ printf("%s%sdlen", sep, args[i]) >> SEDFILE
+ sep = ", ";
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdoff", sep, args[i]) >> PFILE
+ printf("%s%sdoff", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sulen", sep, args[i]) >> PFILE
+ printf("%s%sulen", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sflags", sep, args[i]) >> PFILE
+ printf("%s%sflags", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sdata", sep, args[i]) >> PFILE
+ printf("%s%sdata", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%ssize", sep, args[i]) >> PFILE
+ printf("%s%ssize", sep, args[i]) >> SEDFILE
+ }
+ sep = ", ";
+ }
+ printf("%sreplyp",sep) >> PFILE
+ printf("%sreplyp",sep) >> SEDFILE
+ if (xdr_free) {
+ printf("%sfreep)\n",sep) >> PFILE
+ printf("%sfreep)\\\n",sep) >> SEDFILE
+ } else {
+ printf(")\n") >> PFILE
+ printf(")\\\n") >> SEDFILE
+ }
+ #
+ # Spit out arg types/names;
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tlong %scl_id;\n", args[i]) >> PFILE
+ printf("\\\tlong %scl_id;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tchar *%s;\n", args[i]) >> PFILE
+ printf("\\\tchar *%s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\tu_int8_t *%s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t *%s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tu_int32_t %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ printf("\tu_int8_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t * %s;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", \
+ args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tu_int32_t %slen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %slen;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tu_int32_t %sdlen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdlen;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sdoff;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sulen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sulen;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sflags;\\\n", args[i]) >> SEDFILE
+ printf("\tvoid *%sdata;\n", args[i]) >> PFILE
+ printf("\\\tvoid *%sdata;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
+ }
+ }
+ printf("\t__%s_reply *replyp;\n",name) >> PFILE
+ printf("\\\t__%s_reply *replyp;\\\n",name) >> SEDFILE
+ if (xdr_free) {
+ printf("\tint * freep;\n") >> PFILE
+ printf("\\\tint * freep;\\\n") >> SEDFILE
+ }
+
+ printf("/* END __%s_proc */\n", name) >> PFILE
+ printf("/* END __%s_proc */\n", name) >> SEDFILE
+
+ #
+ # Function body
+ #
+ printf("{\n") >> PFILE
+ printf("\tint ret;\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\t%s %s;\n", c_type[i], args[i]) >> PFILE
+ printf("\tct_entry *%s_ctp;\n", args[i]) >> PFILE
+ }
+ }
+ printf("\n") >> PFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tACTIVATE_CTP(%s_ctp, %scl_id, %s);\n", \
+ args[i], args[i], ctp_type[i]) >> PFILE
+ printf("\t%s = (%s)%s_ctp->ct_anyp;\n", \
+ args[i], c_type[i], args[i]) >> PFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> PFILE
+ printf("\treplyp->status = ret;\n") >> PFILE
+ printf("\treturn;\n") >> PFILE
+ printf("}\n\n") >> PFILE
+
+ #
+ # =====================================================
+ # Generate Client code
+ #
+ # Spit out PUBLIC prototypes.
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
+ #
+ # Spit out function name/args.
+ #
+ printf("int\n") >> CFILE
+ printf("__dbcl_%s(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf(")\n") >> CFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) >> CFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> CFILE
+
+ printf("{\n") >> CFILE
+ printf("\tCLIENT *cl;\n") >> CFILE
+ printf("\t__%s_msg msg;\n", name) >> CFILE
+ printf("\t__%s_reply *replyp = NULL;\n", name) >> CFILE;
+ printf("\tint ret;\n") >> CFILE
+ if (!env_handle)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+ #
+ # If we are managing a list, we need a few more vars.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t%s %sp;\n", c_type[i], args[i]) >> CFILE
+ printf("\tint %si;\n", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("\tu_int8_t ** %sq;\n", args[i]) >> CFILE
+ else
+ printf("\tu_int32_t * %sq;\n", args[i]) >> CFILE
+ }
+ }
+
+ printf("\n") >> CFILE
+ printf("\tret = 0;\n") >> CFILE
+ if (!env_handle) {
+ if (db_handle)
+ printf("\tdbenv = %s->dbenv;\n", args[db_idx]) >> CFILE
+ else if (dbc_handle)
+ printf("\tdbenv = %s->dbp->dbenv;\n", \
+ args[dbc_idx]) >> CFILE
+ else if (txn_handle)
+ printf("\tdbenv = %s->mgrp->dbenv;\n", \
+ args[txn_idx]) >> CFILE
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") \
+ >> CFILE
+ printf("\t\treturn (__dbcl_noserver(NULL));\n") >> CFILE
+ } else {
+ printf("\tif (%s == NULL || !RPC_ON(%s))\n", \
+ args[env_idx], args[env_idx]) >> CFILE
+ printf("\t\treturn (__dbcl_noserver(%s));\n", \
+ args[env_idx]) >> CFILE
+ }
+ printf("\n") >> CFILE
+
+ if (!env_handle)
+ printf("\tcl = (CLIENT *)dbenv->cl_handle;\n") >> CFILE
+ else
+ printf("\tcl = (CLIENT *)%s->cl_handle;\n", \
+ args[env_idx]) >> CFILE
+
+ printf("\n") >> CFILE
+
+ #
+ # If there is a function arg, check that it is NULL
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (func_arg[i] != 1)
+ continue;
+ printf("\tif (%s != NULL) {\n", args[i]) >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ }
+ printf("\"User functions not supported in RPC\");\n") >> CFILE
+ printf("\t\treturn (EINVAL);\n\t}\n") >> CFILE
+ }
+
+ #
+ # Compose message components
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "ID") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\tmsg.%scl_id = 0;\n\telse\n", \
+ args[i]) >> CFILE
+ if (c_type[i] == "DB_TXN *") {
+ printf("\t\tmsg.%scl_id = %s->txnid;\n", \
+ args[i], args[i]) >> CFILE
+ } else {
+ printf("\t\tmsg.%scl_id = %s->cl_id;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ }
+ if (rpc_type[i] == "GID") {
+ printf("\tmemcpy(msg.%s, %s, %d);\n", \
+ args[i], args[i], xidsize) >> CFILE
+ }
+ if (rpc_type[i] == "INT") {
+ printf("\tmsg.%s = %s;\n", args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "STRING") {
+ printf("\tif (%s == NULL)\n", args[i]) >> CFILE
+ printf("\t\tmsg.%s = \"\";\n", args[i]) >> CFILE
+ printf("\telse\n") >> CFILE
+ printf("\t\tmsg.%s = (char *)%s;\n", \
+ args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "DBT") {
+ printf("\tmsg.%sdlen = %s->dlen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdoff = %s->doff;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sulen = %s->ulen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sflags = %s->flags;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdata.%sdata_val = %s->data;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdata.%sdata_len = %s->size;\n", \
+ args[i], args[i], args[i]) >> CFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tfor (%si = 0, %sp = %s; *%sp != 0; ", \
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf(" %si++, %sp++)\n\t\t;\n", args[i], args[i]) \
+ >> CFILE
+
+ #
+ # If we are an array of ints, *_len is how many
+ # elements. If we are a GID, *_len is total bytes.
+ #
+ printf("\tmsg.%s.%s_len = %si",args[i], args[i], \
+ args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" * %d;\n", xidsize) >> CFILE
+ else
+ printf(";\n") >> CFILE
+ printf("\tif ((ret = __os_calloc(") >> CFILE
+ if (!env_handle)
+ printf("dbenv,\n") >> CFILE
+ else
+ printf("%s,\n", args[env_idx]) >> CFILE
+ printf("\t msg.%s.%s_len,", \
+ args[i], args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" 1,") >> CFILE
+ else
+ printf(" sizeof(u_int32_t),") >> CFILE
+ printf(" &msg.%s.%s_val)) != 0)\n",\
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf("\t\treturn (ret);\n") >> CFILE
+ printf("\tfor (%sq = msg.%s.%s_val, %sp = %s; ", \
+ args[i], args[i], args[i], \
+ args[i], args[i]) >> CFILE
+ printf("%si--; %sq++, %sp++)\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\t\t*%sq = ", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("*%sp;\n", args[i]) >> CFILE
+ if (list_type[i] == "ID")
+ printf("(*%sp)->cl_id;\n", args[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("*%sp;\n", args[i]) >> CFILE
+ }
+ }
+
+ printf("\n") >> CFILE
+ printf("\treplyp = __db_%s_%d%03d(&msg, cl);\n", name, major, minor) \
+ >> CFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__os_free(") >> CFILE
+ if (!env_handle)
+ printf("dbenv, ") >> CFILE
+ else
+ printf("%s, ", args[env_idx]) >> CFILE
+ printf("msg.%s.%s_val);\n", args[i], args[i]) >> CFILE
+ }
+ }
+ printf("\tif (replyp == NULL) {\n") >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ printf("clnt_sperror(cl, \"Berkeley DB\"));\n") >> CFILE
+ }
+ printf("\t\tret = DB_NOSERVER;\n") >> CFILE
+ printf("\t\tgoto out;\n") >> CFILE
+ printf("\t}\n") >> CFILE
+
+ if (ret_code == 0) {
+ printf("\tret = replyp->status;\n") >> CFILE
+ } else {
+ printf("\tret = __dbcl_%s_ret(", name) >> CFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> CFILE
+ sep = ", ";
+ }
+ printf("%sreplyp);\n", sep) >> CFILE
+ }
+ printf("out:\n") >> CFILE
+ #
+ # Free reply if there was one.
+ #
+ printf("\tif (replyp != NULL)\n") >> CFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply,",name) >> CFILE
+ printf(" (void *)replyp);\n") >> CFILE
+ printf("\treturn (ret);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ #
+ # Generate Client Template code
+ #
+ if (ret_code) {
+ #
+ # If we are doing a list, write prototypes
+ #
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s_ret __P((", name);
+ p[pi++] = "";
+ for (i = 0; i < nvars; ++i) {
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
+ }
+ p[pi++] = sprintf("__%s_reply *));", name);
+ p[pi++] = "";
+ proto_format(p, 0, TFILE);
+
+ printf("int\n") >> TFILE
+ printf("__dbcl_%s_ret(", name) >> TFILE
+ sep = "";
+ for (i = 0; i < nvars; ++i) {
+ printf("%s%s", sep, args[i]) >> TFILE
+ sep = ", ";
+ }
+ printf("%sreplyp)\n",sep) >> TFILE
+
+ for (i = 0; i < nvars; ++i)
+ if (func_arg[i] == 0)
+ printf("\t%s %s;\n", c_type[i], args[i]) \
+ >> TFILE
+ else
+ printf("\t%s;\n", c_type[i]) >> TFILE
+ printf("\t__%s_reply *replyp;\n", name) >> TFILE;
+ printf("{\n") >> TFILE
+ printf("\tint ret;\n") >> TFILE
+ #
+ # Local vars in template
+ #
+ for (i = 0; i < rvars; ++i) {
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s %s;\n", \
+ retc_type[i], retargs[i]) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ if (retlist_type[i] == "GID")
+ printf("\tu_int8_t *__db_%s;\n", \
+ retargs[i]) >> TFILE
+ if (retlist_type[i] == "ID" ||
+ retlist_type[i] == "INT")
+ printf("\tu_int32_t *__db_%s;\n", \
+ retargs[i]) >> TFILE
+ } else {
+ printf("\t/* %s %s; */\n", \
+ ret_type[i], retargs[i]) >> TFILE
+ }
+ }
+ #
+ # Client return code
+ #
+ printf("\n") >> TFILE
+ printf("\tif (replyp->status != 0)\n") >> TFILE
+ printf("\t\treturn (replyp->status);\n") >> TFILE
+ for (i = 0; i < rvars; ++i) {
+ varname = "";
+ if (ret_type[i] == "ID") {
+ varname = sprintf("%scl_id", retargs[i]);
+ }
+ if (ret_type[i] == "STRING") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ varname = retargs[i];
+ }
+ if (ret_type[i] == "DBT") {
+ varname = sprintf("%sdata", retargs[i]);
+ }
+ if (ret_type[i] == "ID" || ret_type[i] == "STRING" ||
+ ret_type[i] == "INT" || ret_type[i] == "DBL") {
+ printf("\t%s = replyp->%s;\n", \
+ retargs[i], varname) >> TFILE
+ } else if (ret_type[i] == "LIST") {
+ printf("\n\t/*\n") >> TFILE
+ printf("\t * XXX Handle list\n") >> TFILE
+ printf("\t */\n\n") >> TFILE
+ } else {
+ printf("\t/* Handle replyp->%s; */\n", \
+ varname) >> TFILE
+ }
+ }
+ printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> TFILE
+ printf("\treturn (replyp->status);\n") >> TFILE
+ printf("}\n\n") >> TFILE
+ }
+}
+
+#
+# split_lines --
+# Add line separators to pretty-print the output.
+function split_lines() {
+ if (argcount > 3) {
+ # Reset the counter, remove any trailing whitespace from
+ # the separator.
+ argcount = 0;
+ sub("[ ]$", "", sep)
+
+ printf("%s\n\t\t", sep) >> PFILE
+ printf("%s\\\n\\\t\\\t", sep) >> SEDFILE
+ }
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p, sedfile, OUTPUT)
+{
+ if (sedfile)
+ printf("/*\\\n") >> OUTPUT;
+ else
+ printf("/*\n") >> OUTPUT;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ if (sedfile)
+ t = "\\ * PUBLIC: "
+ else
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ if (sedfile)
+ printf("%s%s", t, s) >> OUTPUT;
+ else
+ printf("%s%s", t, s) >> OUTPUT;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> OUTPUT
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 75) {
+ if (sedfile)
+ printf(\
+ "\\\n\\ * PUBLIC: ") >> OUTPUT;
+ else
+ printf("\n * PUBLIC: ") >> OUTPUT;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> OUTPUT;
+ len += length(comma[i]);
+ }
+ }
+ if (sedfile)
+ printf("\\\n\\ */\\\n") >> OUTPUT;
+ else
+ printf("\n */\n") >> OUTPUT;
+ delete p;
+}
diff --git a/storage/bdb/dist/install-sh b/storage/bdb/dist/install-sh
new file mode 100755
index 00000000000..b41a2459161
--- /dev/null
+++ b/storage/bdb/dist/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=$mkdirprog
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+ '
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/storage/bdb/dist/ltmain.sh b/storage/bdb/dist/ltmain.sh
new file mode 100644
index 00000000000..f07d424527d
--- /dev/null
+++ b/storage/bdb/dist/ltmain.sh
@@ -0,0 +1,4999 @@
+# ltmain.sh - Provide generalized library-building support services.
+# NOTE: Changing this file will not affect anything until you rerun configure.
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001
+# Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Check that we have a working $echo.
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell, and then maybe $echo will work.
+ exec $SHELL "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The name of this program.
+progname=`$echo "$0" | sed 's%^.*/%%'`
+modename="$progname"
+
+# Constants.
+PROGRAM=ltmain.sh
+PACKAGE=libtool
+VERSION=1.4.2
+TIMESTAMP=" (1.922.2.53 2001/09/11 03:18:52)"
+
+default_mode=
+help="Try \`$progname --help' for more information."
+magic="%%%MAGIC variable%%%"
+mkdir="mkdir"
+mv="mv -f"
+rm="rm -f"
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
+SP2NL='tr \040 \012'
+NL2SP='tr \015\012 \040\040'
+
+# NLS nuisances.
+# Only set LANG and LC_ALL to C if already set.
+# These must not be set unconditionally because not all systems understand
+# e.g. LANG=C (notably SCO).
+# We save the old values to restore during execute mode.
+if test "${LC_ALL+set}" = set; then
+ save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
+fi
+if test "${LANG+set}" = set; then
+ save_LANG="$LANG"; LANG=C; export LANG
+fi
+
+# Make sure IFS has a sensible default
+: ${IFS=" "}
+
+if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ echo "$modename: not configured to build any kind of library" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+fi
+
+# Global variables.
+mode=$default_mode
+nonopt=
+prev=
+prevopt=
+run=
+show="$echo"
+show_help=
+execute_dlfiles=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+
+# Parse our command line options once, thoroughly.
+while test $# -gt 0
+do
+ arg="$1"
+ shift
+
+ case $arg in
+ -*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ execute_dlfiles)
+ execute_dlfiles="$execute_dlfiles $arg"
+ ;;
+ *)
+ eval "$prev=\$arg"
+ ;;
+ esac
+
+ prev=
+ prevopt=
+ continue
+ fi
+
+ # Have we seen a non-optional argument yet?
+ case $arg in
+ --help)
+ show_help=yes
+ ;;
+
+ --version)
+ echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
+ exit 0
+ ;;
+
+ --config)
+ sed -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $0
+ exit 0
+ ;;
+
+ --debug)
+ echo "$progname: enabling shell trace mode"
+ set -x
+ ;;
+
+ --dry-run | -n)
+ run=:
+ ;;
+
+ --features)
+ echo "host: $host"
+ if test "$build_libtool_libs" = yes; then
+ echo "enable shared libraries"
+ else
+ echo "disable shared libraries"
+ fi
+ if test "$build_old_libs" = yes; then
+ echo "enable static libraries"
+ else
+ echo "disable static libraries"
+ fi
+ exit 0
+ ;;
+
+ --finish) mode="finish" ;;
+
+ --mode) prevopt="--mode" prev=mode ;;
+ --mode=*) mode="$optarg" ;;
+
+ --quiet | --silent)
+ show=:
+ ;;
+
+ -dlopen)
+ prevopt="-dlopen"
+ prev=execute_dlfiles
+ ;;
+
+ -*)
+ $echo "$modename: unrecognized option \`$arg'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+
+ *)
+ nonopt="$arg"
+ break
+ ;;
+ esac
+done
+
+if test -n "$prevopt"; then
+ $echo "$modename: option \`$prevopt' requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+fi
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end. This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+if test -z "$show_help"; then
+
+ # Infer the operation mode.
+ if test -z "$mode"; then
+ case $nonopt in
+ *cc | *++ | gcc* | *-gcc*)
+ mode=link
+ for arg
+ do
+ case $arg in
+ -c)
+ mode=compile
+ break
+ ;;
+ esac
+ done
+ ;;
+ *db | *dbx | *strace | *truss)
+ mode=execute
+ ;;
+ *install*|cp|mv)
+ mode=install
+ ;;
+ *rm)
+ mode=uninstall
+ ;;
+ *)
+ # If we have no mode, but dlfiles were specified, then do execute mode.
+ test -n "$execute_dlfiles" && mode=execute
+
+ # Just use the default operation mode.
+ if test -z "$mode"; then
+ if test -n "$nonopt"; then
+ $echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
+ else
+ $echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
+ fi
+ fi
+ ;;
+ esac
+ fi
+
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$execute_dlfiles" && test "$mode" != execute; then
+ $echo "$modename: unrecognized option \`-dlopen'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$modename --help --mode=$mode' for more information."
+
+ # These modes are in order of execution frequency so that they run quickly.
+ case $mode in
+ # libtool compile mode
+ compile)
+ modename="$modename: compile"
+ # Get the compilation command and the source file.
+ base_compile=
+ prev=
+ lastarg=
+ srcfile="$nonopt"
+ suppress_output=
+
+ user_target=no
+ for arg
+ do
+ case $prev in
+ "") ;;
+ xcompiler)
+ # Aesthetically quote the previous argument.
+ prev=
+ lastarg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+
+ case $arg in
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
+ esac
+
+ # Accept any command-line options.
+ case $arg in
+ -o)
+ if test "$user_target" != "no"; then
+ $echo "$modename: you cannot specify \`-o' more than once" 1>&2
+ exit 1
+ fi
+ user_target=next
+ ;;
+
+ -static)
+ build_old_libs=yes
+ continue
+ ;;
+
+ -prefer-pic)
+ pic_mode=yes
+ continue
+ ;;
+
+ -prefer-non-pic)
+ pic_mode=no
+ continue
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
+ lastarg=
+ save_ifs="$IFS"; IFS=','
+ for arg in $args; do
+ IFS="$save_ifs"
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ lastarg="$lastarg $arg"
+ done
+ IFS="$save_ifs"
+ lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
+
+ # Add the arguments to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
+ esac
+
+ case $user_target in
+ next)
+ # The next one is the -o target name
+ user_target=yes
+ continue
+ ;;
+ yes)
+ # We got the output file
+ user_target=set
+ libobj="$arg"
+ continue
+ ;;
+ esac
+
+ # Accept the current argument as the source file.
+ lastarg="$srcfile"
+ srcfile="$arg"
+
+ # Aesthetically quote the previous argument.
+
+ # Backslashify any backslashes, double quotes, and dollar signs.
+ # These are the only characters that are still specially
+ # interpreted inside of double-quoted scrings.
+ lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $lastarg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ lastarg="\"$lastarg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ done
+
+ case $user_target in
+ set)
+ ;;
+ no)
+ # Get the name of the library object.
+ libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
+ ;;
+ *)
+ $echo "$modename: you must specify a target with \`-o'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Recognize several different file suffixes.
+ # If the user specifies -o file.o, it is replaced with file.lo
+ xform='[cCFSfmso]'
+ case $libobj in
+ *.ada) xform=ada ;;
+ *.adb) xform=adb ;;
+ *.ads) xform=ads ;;
+ *.asm) xform=asm ;;
+ *.c++) xform=c++ ;;
+ *.cc) xform=cc ;;
+ *.cpp) xform=cpp ;;
+ *.cxx) xform=cxx ;;
+ *.f90) xform=f90 ;;
+ *.for) xform=for ;;
+ esac
+
+ libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
+
+ case $libobj in
+ *.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
+ *)
+ $echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test -z "$base_compile"; then
+ $echo "$modename: you must specify a compilation command" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Delete any leftover library objects.
+ if test "$build_old_libs" = yes; then
+ removelist="$obj $libobj"
+ else
+ removelist="$libobj"
+ fi
+
+ $run $rm $removelist
+ trap "$run $rm $removelist; exit 1" 1 2 15
+
+ # On Cygwin there's no "real" PIC flag so we must build both object types
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ pic_mode=default
+ ;;
+ esac
+ if test $pic_mode = no && test "$deplibs_check_method" != pass_all; then
+ # non-PIC code in shared libraries is not supported
+ pic_mode=default
+ fi
+
+ # Calculate the filename of the output object if compiler does
+ # not support -o with -c
+ if test "$compiler_c_o" = no; then
+ output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
+ lockfile="$output_obj.lock"
+ removelist="$removelist $output_obj $lockfile"
+ trap "$run $rm $removelist; exit 1" 1 2 15
+ else
+ need_locks=no
+ lockfile=
+ fi
+
+ # Lock this critical section if it is needed
+ # We use this script file to make the link, it avoids creating a new file
+ if test "$need_locks" = yes; then
+ until $run ln "$0" "$lockfile" 2>/dev/null; do
+ $show "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ elif test "$need_locks" = warn; then
+ if test -f "$lockfile"; then
+ echo "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+ echo $srcfile > "$lockfile"
+ fi
+
+ if test -n "$fix_srcfile_path"; then
+ eval srcfile=\"$fix_srcfile_path\"
+ fi
+
+ # Only build a PIC object if we are building libtool libraries.
+ if test "$build_libtool_libs" = yes; then
+ # Without this assignment, base_compile gets emptied.
+ fbsd_hideous_sh_bug=$base_compile
+
+ if test "$pic_mode" != no; then
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ else
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ fi
+ if test "$build_old_libs" = yes; then
+ lo_libobj="$libobj"
+ dir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$libobj"; then
+ dir="$objdir"
+ else
+ dir="$dir/$objdir"
+ fi
+ libobj="$dir/"`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+
+ if test -d "$dir"; then
+ $show "$rm $libobj"
+ $run $rm $libobj
+ else
+ $show "$mkdir $dir"
+ $run $mkdir $dir
+ status=$?
+ if test $status -ne 0 && test ! -d $dir; then
+ exit $status
+ fi
+ fi
+ fi
+ if test "$compiler_o_lo" = yes; then
+ output_obj="$libobj"
+ command="$command -o $output_obj"
+ elif test "$compiler_c_o" = yes; then
+ output_obj="$obj"
+ command="$command -o $output_obj"
+ fi
+
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ test -n "$output_obj" && $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed, then go on to compile the next one
+ if test x"$output_obj" != x"$libobj"; then
+ $show "$mv $output_obj $libobj"
+ if $run $mv $output_obj $libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # If we have no pic_flag, then copy the object into place and finish.
+ if (test -z "$pic_flag" || test "$pic_mode" != default) &&
+ test "$build_old_libs" = yes; then
+ # Rename the .lo from within objdir to obj
+ if test -f $obj; then
+ $show $rm $obj
+ $run $rm $obj
+ fi
+
+ $show "$mv $libobj $obj"
+ if $run $mv $libobj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e "s%.*/%%"`
+ libobj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ # Now arrange that obj and lo_libobj become the same file
+ $show "(cd $xdir && $LN_S $baseobj $libobj)"
+ if $run eval '(cd $xdir && $LN_S $baseobj $libobj)'; then
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $run $rm "$lockfile"
+ fi
+ exit 0
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Allow error messages only from the first compilation.
+ suppress_output=' >/dev/null 2>&1'
+ fi
+
+ # Only build a position-dependent object if we build old libraries.
+ if test "$build_old_libs" = yes; then
+ if test "$pic_mode" != yes; then
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ else
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ fi
+ if test "$compiler_c_o" = yes; then
+ command="$command -o $obj"
+ output_obj="$obj"
+ fi
+
+ # Suppress compiler output if we already did a PIC compilation.
+ command="$command$suppress_output"
+ $run $rm "$output_obj"
+ $show "$command"
+ if $run eval "$command"; then :
+ else
+ $run $rm $removelist
+ exit 1
+ fi
+
+ if test "$need_locks" = warn &&
+ test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
+ echo "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $run $rm $removelist
+ exit 1
+ fi
+
+ # Just move the object if needed
+ if test x"$output_obj" != x"$obj"; then
+ $show "$mv $output_obj $obj"
+ if $run $mv $output_obj $obj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we do not
+ # accidentally link it into a program.
+ if test "$build_libtool_libs" != yes; then
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > \$libobj" || exit $?
+ else
+ # Move the .lo from within objdir
+ $show "$mv $libobj $lo_libobj"
+ if $run $mv $libobj $lo_libobj; then :
+ else
+ error=$?
+ $run $rm $removelist
+ exit $error
+ fi
+ fi
+ fi
+
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $run $rm "$lockfile"
+ fi
+
+ exit 0
+ ;;
+
+ # libtool link mode
+ link | relink)
+ modename="$modename: link"
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ # It is impossible to link a dll without this setting, and
+ # we shouldn't force the makefile maintainer to figure out
+ # which system we are compiling for in order to pass an extra
+ # flag for every libtool invokation.
+ # allow_undefined=no
+
+ # FIXME: Unfortunately, there are problems with the above when trying
+ # to make a dll which has undefined symbols, in which case not
+ # even a static library is built. For now, we need to specify
+ # -no-undefined on the libtool link line when we can be certain
+ # that all symbols are satisfied, otherwise we get a static library.
+ allow_undefined=yes
+ ;;
+ *)
+ allow_undefined=yes
+ ;;
+ esac
+ libtool_args="$nonopt"
+ compile_command="$nonopt"
+ finalize_command="$nonopt"
+
+ compile_rpath=
+ finalize_rpath=
+ compile_shlibpath=
+ finalize_shlibpath=
+ convenience=
+ old_convenience=
+ deplibs=
+ old_deplibs=
+ compiler_flags=
+ linker_flags=
+ dllsearchpath=
+ lib_search_path=`pwd`
+
+ avoid_version=no
+ dlfiles=
+ dlprefiles=
+ dlself=no
+ export_dynamic=no
+ export_symbols=
+ export_symbols_regex=
+ generated=
+ libobjs=
+ ltlibs=
+ module=no
+ no_install=no
+ objs=
+ prefer_static_libs=no
+ preload=no
+ prev=
+ prevarg=
+ release=
+ rpath=
+ xrpath=
+ perm_rpath=
+ temp_rpath=
+ thread_safe=no
+ vinfo=
+
+ # We need to know -static, to get the right output filenames.
+ for arg
+ do
+ case $arg in
+ -all-static | -static)
+ if test "X$arg" = "X-all-static"; then
+ if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+ $echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
+ fi
+ if test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ else
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ fi
+ build_libtool_libs=no
+ build_old_libs=yes
+ prefer_static_libs=yes
+ break
+ ;;
+ esac
+ done
+
+ # See if our shared archives depend on static archives.
+ test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+ # Go through the arguments, transforming them on the way.
+ while test $# -gt 0; do
+ arg="$1"
+ shift
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
+ ;;
+ *) qarg=$arg ;;
+ esac
+ libtool_args="$libtool_args $qarg"
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ output)
+ compile_command="$compile_command @OUTPUT@"
+ finalize_command="$finalize_command @OUTPUT@"
+ ;;
+ esac
+
+ case $prev in
+ dlfiles|dlprefiles)
+ if test "$preload" = no; then
+ # Add the symbol object into the linking commands.
+ compile_command="$compile_command @SYMFILE@"
+ finalize_command="$finalize_command @SYMFILE@"
+ preload=yes
+ fi
+ case $arg in
+ *.la | *.lo) ;; # We handle these cases below.
+ force)
+ if test "$dlself" = no; then
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ self)
+ if test "$prev" = dlprefiles; then
+ dlself=yes
+ elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+ dlself=yes
+ else
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ else
+ dlprefiles="$dlprefiles $arg"
+ fi
+ prev=
+ continue
+ ;;
+ esac
+ ;;
+ expsyms)
+ export_symbols="$arg"
+ if test ! -f "$arg"; then
+ $echo "$modename: symbol file \`$arg' does not exist"
+ exit 1
+ fi
+ prev=
+ continue
+ ;;
+ expsyms_regex)
+ export_symbols_regex="$arg"
+ prev=
+ continue
+ ;;
+ release)
+ release="-$arg"
+ prev=
+ continue
+ ;;
+ rpath | xrpath)
+ # We need an absolute path.
+ case $arg in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ if test "$prev" = rpath; then
+ case "$rpath " in
+ *" $arg "*) ;;
+ *) rpath="$rpath $arg" ;;
+ esac
+ else
+ case "$xrpath " in
+ *" $arg "*) ;;
+ *) xrpath="$xrpath $arg" ;;
+ esac
+ fi
+ prev=
+ continue
+ ;;
+ xcompiler)
+ compiler_flags="$compiler_flags $qarg"
+ prev=
+ compile_command="$compile_command $qarg"
+ finalize_command="$finalize_command $qarg"
+ continue
+ ;;
+ xlinker)
+ linker_flags="$linker_flags $qarg"
+ compiler_flags="$compiler_flags $wl$qarg"
+ prev=
+ compile_command="$compile_command $wl$qarg"
+ finalize_command="$finalize_command $wl$qarg"
+ continue
+ ;;
+ *)
+ eval "$prev=\"\$arg\""
+ prev=
+ continue
+ ;;
+ esac
+ fi # test -n $prev
+
+ prevarg="$arg"
+
+ case $arg in
+ -all-static)
+ if test -n "$link_static_flag"; then
+ compile_command="$compile_command $link_static_flag"
+ finalize_command="$finalize_command $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -allow-undefined)
+ # FIXME: remove this flag sometime in the future.
+ $echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
+ continue
+ ;;
+
+ -avoid-version)
+ avoid_version=yes
+ continue
+ ;;
+
+ -dlopen)
+ prev=dlfiles
+ continue
+ ;;
+
+ -dlpreopen)
+ prev=dlprefiles
+ continue
+ ;;
+
+ -export-dynamic)
+ export_dynamic=yes
+ continue
+ ;;
+
+ -export-symbols | -export-symbols-regex)
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: more than one -exported-symbols argument is not allowed"
+ exit 1
+ fi
+ if test "X$arg" = "X-export-symbols"; then
+ prev=expsyms
+ else
+ prev=expsyms_regex
+ fi
+ continue
+ ;;
+
+ # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+ # so, if we see these flags be careful not to treat them like -L
+ -L[A-Z][A-Z]*:*)
+ case $with_gcc/$host in
+ no/*-*-irix*)
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ ;;
+ esac
+ continue
+ ;;
+
+ -L*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
+ exit 1
+ fi
+ dir="$absdir"
+ ;;
+ esac
+ case "$deplibs " in
+ *" -L$dir "*) ;;
+ *)
+ deplibs="$deplibs -L$dir"
+ lib_search_path="$lib_search_path $dir"
+ ;;
+ esac
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$dir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$dir";;
+ esac
+ ;;
+ esac
+ continue
+ ;;
+
+ -l*)
+ if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+ case $host in
+ *-*-cygwin* | *-*-pw32* | *-*-beos*)
+ # These systems don't actually have a C or math library (as such)
+ continue
+ ;;
+ *-*-mingw* | *-*-os2*)
+ # These systems don't actually have a C library (as such)
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ test "X$arg" = "X-lc" && continue
+ ;;
+ esac
+ elif test "X$arg" = "X-lc_r"; then
+ case $host in
+ *-*-openbsd*)
+ # Do not include libc_r directly, use -pthread flag.
+ continue
+ ;;
+ esac
+ fi
+ deplibs="$deplibs $arg"
+ continue
+ ;;
+
+ -module)
+ module=yes
+ continue
+ ;;
+
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ -jnimodule)
+ module=yes
+ jnimodule=yes
+ continue
+ ;;
+
+ -no-fast-install)
+ fast_install=no
+ continue
+ ;;
+
+ -no-install)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ # The PATH hackery in wrapper scripts is required on Windows
+ # in order for the loader to find any dlls it needs.
+ $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
+ $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
+ fast_install=no
+ ;;
+ *) no_install=yes ;;
+ esac
+ continue
+ ;;
+
+ -no-undefined)
+ allow_undefined=no
+ continue
+ ;;
+
+ -o) prev=output ;;
+
+ -release)
+ prev=release
+ continue
+ ;;
+
+ -rpath)
+ prev=rpath
+ continue
+ ;;
+
+ -R)
+ prev=xrpath
+ continue
+ ;;
+
+ -R*)
+ dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ $echo "$modename: only absolute run-paths are allowed" 1>&2
+ exit 1
+ ;;
+ esac
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ continue
+ ;;
+
+ -static)
+ # The effects of -static are defined in a previous loop.
+ # We used to do the same as -all-static on platforms that
+ # didn't have a PIC flag, but the assumption that the effects
+ # would be equivalent was wrong. It would break on at least
+ # Digital Unix and AIX.
+ continue
+ ;;
+
+ -thread-safe)
+ thread_safe=yes
+ continue
+ ;;
+
+ -version-info)
+ prev=vinfo
+ continue
+ ;;
+
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Wl,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $wl$flag"
+ linker_flags="$linker_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Xlinker)
+ prev=xlinker
+ continue
+ ;;
+
+ # Some other compiler flag.
+ -* | +*)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+
+ *.lo | *.$objext)
+ # A library or standard object.
+ if test "$prev" = dlfiles; then
+ # This file was specified with -dlopen.
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ dlfiles="$dlfiles $arg"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ dlprefiles="$dlprefiles "`$echo "X$arg" | $Xsed -e "$lo2o"`
+ prev=
+ else
+ case $arg in
+ *.lo) libobjs="$libobjs $arg" ;;
+ *) objs="$objs $arg" ;;
+ esac
+ fi
+ ;;
+
+ *.$libext)
+ # An archive.
+ deplibs="$deplibs $arg"
+ old_deplibs="$old_deplibs $arg"
+ continue
+ ;;
+
+ *.la)
+ # A libtool-controlled library.
+
+ if test "$prev" = dlfiles; then
+ # This library was specified with -dlopen.
+ dlfiles="$dlfiles $arg"
+ prev=
+ elif test "$prev" = dlprefiles; then
+ # The library was specified with -dlpreopen.
+ dlprefiles="$dlprefiles $arg"
+ prev=
+ else
+ deplibs="$deplibs $arg"
+ fi
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+ esac # arg
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+ done # argument parsing loop
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prevarg' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+
+ # calculate the name of the file, without its directory
+ outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
+ libobjs_save="$libobjs"
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ else
+ shlib_search_path=
+ fi
+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$output_objdir" = "X$output"; then
+ output_objdir="$objdir"
+ else
+ output_objdir="$output_objdir/$objdir"
+ fi
+ # Create the object directory.
+ if test ! -d $output_objdir; then
+ $show "$mkdir $output_objdir"
+ $run $mkdir $output_objdir
+ status=$?
+ if test $status -ne 0 && test ! -d $output_objdir; then
+ exit $status
+ fi
+ fi
+
+ # Determine the type of output
+ case $output in
+ "")
+ $echo "$modename: you must specify an output file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ *.$libext) linkmode=oldlib ;;
+ *.lo | *.$objext) linkmode=obj ;;
+ *.la) linkmode=lib ;;
+ *) linkmode=prog ;; # Anything else should be a program.
+ esac
+
+ specialdeplibs=
+ libs=
+ # Find all interdependent deplibs by searching for libraries
+ # that are linked more than once (e.g. -la -lb -la)
+ for deplib in $deplibs; do
+ case "$libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ libs="$libs $deplib"
+ done
+ deplibs=
+ newdependency_libs=
+ newlib_search_path=
+ need_relink=no # whether we're linking any uninstalled libtool libraries
+ notinst_deplibs= # not-installed libtool libraries
+ notinst_path= # paths that contain not-installed libtool libraries
+ case $linkmode in
+ lib)
+ passes="conv link"
+ for file in $dlfiles $dlprefiles; do
+ case $file in
+ *.la) ;;
+ *)
+ $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ prog)
+ compile_deplibs=
+ finalize_deplibs=
+ alldeplibs=no
+ newdlfiles=
+ newdlprefiles=
+ passes="conv scan dlopen dlpreopen link"
+ ;;
+ *) passes="conv"
+ ;;
+ esac
+ for pass in $passes; do
+ if test $linkmode = prog; then
+ # Determine which files to process
+ case $pass in
+ dlopen)
+ libs="$dlfiles"
+ save_deplibs="$deplibs" # Collect dlpreopened libraries
+ deplibs=
+ ;;
+ dlpreopen) libs="$dlprefiles" ;;
+ link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+ esac
+ fi
+ for deplib in $libs; do
+ lib=
+ found=no
+ case $deplib in
+ -l*)
+ if test $linkmode = oldlib && test $linkmode = obj; then
+ $echo "$modename: warning: \`-l' is ignored for archives/objects: $deplib" 1>&2
+ continue
+ fi
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
+ for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ # Search the libtool library
+ lib="$searchdir/lib${name}.la"
+ if test -f "$lib"; then
+ found=yes
+ break
+ fi
+ done
+ if test "$found" != yes; then
+ # deplib doesn't seem to be a libtool library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test $linkmode = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ fi
+ ;; # -l
+ -L*)
+ case $linkmode in
+ lib)
+ deplibs="$deplib $deplibs"
+ test $pass = conv && continue
+ newdependency_libs="$deplib $newdependency_libs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ ;;
+ prog)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ if test $pass = scan; then
+ deplibs="$deplib $deplibs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ ;;
+ *)
+ $echo "$modename: warning: \`-L' is ignored for archives/objects: $deplib" 1>&2
+ ;;
+ esac # linkmode
+ continue
+ ;; # -L
+ -R*)
+ if test $pass = link; then
+ dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
+ # Make sure the xrpath contains only unique directories.
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ fi
+ deplibs="$deplib $deplibs"
+ continue
+ ;;
+ *.la) lib="$deplib" ;;
+ *.$libext)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ case $linkmode in
+ lib)
+ if test "$deplibs_check_method" != pass_all; then
+ echo
+ echo "*** Warning: This library needs some functionality provided by $deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the"
+ echo "*** static library $deplib is not portable!"
+ deplibs="$deplib $deplibs"
+ fi
+ continue
+ ;;
+ prog)
+ if test $pass != link; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ continue
+ ;;
+ esac # linkmode
+ ;; # *.$libext
+ *.lo | *.$objext)
+ if test $pass = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlopen support or we're linking statically,
+ # we need to preload.
+ newdlprefiles="$newdlprefiles $deplib"
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ newdlfiles="$newdlfiles $deplib"
+ fi
+ continue
+ ;;
+ %DEPLIBS%)
+ alldeplibs=yes
+ continue
+ ;;
+ esac # case $deplib
+ if test $found = yes || test -f "$lib"; then :
+ else
+ $echo "$modename: cannot find the library \`$lib'" 1>&2
+ exit 1
+ fi
+
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $lib | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+
+ ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$ladir" = "X$lib" && ladir="."
+
+ dlname=
+ dlopen=
+ dlpreopen=
+ libdir=
+ library_names=
+ old_library=
+ # If the library was installed with an old release of libtool,
+ # it will not redefine variable installed.
+ installed=yes
+
+ # Read the .la file
+ case $lib in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
+ esac
+
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan" ||
+ { test $linkmode = oldlib && test $linkmode = obj; }; then
+ # Add dl[pre]opened files of deplib
+ test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
+ test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
+ fi
+
+ if test $pass = conv; then
+ # Only check for convenience libraries
+ deplibs="$lib $deplibs"
+ if test -z "$libdir"; then
+ if test -z "$old_library"; then
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
+ exit 1
+ fi
+ # It is a libtool convenience library, so add in its objects.
+ convenience="$convenience $ladir/$objdir/$old_library"
+ old_convenience="$old_convenience $ladir/$objdir/$old_library"
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ deplibs="$deplib $deplibs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
+ elif test $linkmode != prog && test $linkmode != lib; then
+ $echo "$modename: \`$lib' is not a convenience library" 1>&2
+ exit 1
+ fi
+ continue
+ fi # $pass = conv
+
+ # Get the name of the library we link against.
+ linklib=
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+ if test -z "$linklib"; then
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
+ exit 1
+ fi
+
+ # This library was specified with -dlopen.
+ if test $pass = dlopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
+ if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking
+ # statically, we need to preload.
+ dlprefiles="$dlprefiles $lib"
+ else
+ newdlfiles="$newdlfiles $lib"
+ fi
+ continue
+ fi # $pass = dlopen
+
+ # We need an absolute path.
+ case $ladir in
+ [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+ *)
+ abs_ladir=`cd "$ladir" && pwd`
+ if test -z "$abs_ladir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
+ $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
+ abs_ladir="$ladir"
+ fi
+ ;;
+ esac
+ laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+
+ # Find the relevant object directory and library name.
+ if test "X$installed" = Xyes; then
+ if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ $echo "$modename: warning: library \`$lib' was moved." 1>&2
+ dir="$ladir"
+ absdir="$abs_ladir"
+ libdir="$abs_ladir"
+ else
+ dir="$libdir"
+ absdir="$libdir"
+ fi
+ else
+ dir="$ladir/$objdir"
+ absdir="$abs_ladir/$objdir"
+ # Remove this search path later
+ notinst_path="$notinst_path $abs_ladir"
+ fi # $installed = yes
+ name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+
+ # This library was specified with -dlpreopen.
+ if test $pass = dlpreopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ newdlprefiles="$newdlprefiles $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ newdlprefiles="$newdlprefiles $dir/$dlname"
+ else
+ newdlprefiles="$newdlprefiles $dir/$linklib"
+ fi
+ fi # $pass = dlpreopen
+
+ if test -z "$libdir"; then
+ # Link the convenience library
+ if test $linkmode = lib; then
+ deplibs="$dir/$old_library $deplibs"
+ elif test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$dir/$old_library $compile_deplibs"
+ finalize_deplibs="$dir/$old_library $finalize_deplibs"
+ else
+ deplibs="$lib $deplibs"
+ fi
+ continue
+ fi
+
+ if test $linkmode = prog && test $pass != link; then
+ newlib_search_path="$newlib_search_path $ladir"
+ deplibs="$lib $deplibs"
+
+ linkalldeplibs=no
+ if test "$link_all_deplibs" != no || test -z "$library_names" ||
+ test "$build_libtool_libs" = no; then
+ linkalldeplibs=yes
+ fi
+
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
+ esac
+ # Need to link against all dependency_libs?
+ if test $linkalldeplibs = yes; then
+ deplibs="$deplib $deplibs"
+ else
+ # Need to hardcode shared library paths
+ # or/and link against static libraries
+ newdependency_libs="$deplib $newdependency_libs"
+ fi
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done # for deplib
+ continue
+ fi # $linkmode = prog...
+
+ link_static=no # Whether the deplib will be linked statically
+ if test -n "$library_names" &&
+ { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
+ # Link against this shared library
+
+ if test "$linkmode,$pass" = "prog,link" ||
+ { test $linkmode = lib && test $hardcode_into_libs = yes; }; then
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
+ esac
+ if test $linkmode = prog; then
+ # We need to hardcode the library path
+ if test -n "$shlibpath_var"; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath " in
+ *" $dir "*) ;;
+ *" $absdir "*) ;;
+ *) temp_rpath="$temp_rpath $dir" ;;
+ esac
+ fi
+ fi
+ fi # $linkmode,$pass = prog,link...
+
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+
+ if test "$installed" = no; then
+ notinst_deplibs="$notinst_deplibs $lib"
+ need_relink=yes
+ fi
+
+ if test -n "$old_archive_from_expsyms_cmds"; then
+ # figure out the soname
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+ libname=`eval \\$echo \"$libname_spec\"`
+ # use dlname if we got it. it's perfectly good, no?
+ if test -n "$dlname"; then
+ soname="$dlname"
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
+ *cygwin*)
+ major=`expr $current - $age`
+ versuffix="-$major"
+ ;;
+ esac
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ # Make a new name for the extract_expsyms_cmds to use
+ soroot="$soname"
+ soname=`echo $soroot | sed -e 's/^.*\///'`
+ newlib="libimp-`echo $soname | sed 's/^lib//;s/\.dll$//'`.a"
+
+ # If the library has no export list, then create one now
+ if test -f "$output_objdir/$soname-def"; then :
+ else
+ $show "extracting exported symbol list from \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$extract_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Create $newlib
+ if test -f "$output_objdir/$newlib"; then :; else
+ $show "generating import library for \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$old_archive_from_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+ # make sure the library variables are pointing to the new library
+ dir=$output_objdir
+ linklib=$newlib
+ fi # test -n $old_archive_from_expsyms_cmds
+
+ if test $linkmode = prog || test "$mode" != relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ lib_linked=yes
+ case $hardcode_action in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = no; then
+ case $host in
+ *-*-sunos*) add_shlibpath="$dir" ;;
+ esac
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ relink)
+ if test "$hardcode_direct" = yes; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ *) lib_linked=no ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ $echo "$modename: configuration error: unsupported hardcode properties"
+ exit 1
+ fi
+
+ if test -n "$add_shlibpath"; then
+ case :$compile_shlibpath: in
+ *":$add_shlibpath:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
+ esac
+ fi
+ if test $linkmode = prog; then
+ test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+ test -n "$add" && compile_deplibs="$add $compile_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ if test "$hardcode_direct" != yes && \
+ test "$hardcode_minus_L" != yes && \
+ test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ fi
+ fi
+ fi
+
+ if test $linkmode = prog || test "$mode" = relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ # Finalize command for both is simple: just hardcode it.
+ if test "$hardcode_direct" = yes; then
+ add="$libdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$libdir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ add="-l$name"
+ else
+ # We cannot seem to hardcode it, guess we'll fake it.
+ add_dir="-L$libdir"
+ add="-l$name"
+ fi
+
+ if test $linkmode = prog; then
+ test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+ test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ fi
+ fi
+ elif test $linkmode = prog; then
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+
+ # Try to link the static library
+ # Here we assume that one of hardcode_direct or hardcode_minus_L
+ # is not unsupported. This is valid on all known static and
+ # shared platforms.
+ if test "$hardcode_direct" != unsupported; then
+ test -n "$old_library" && linklib="$old_library"
+ compile_deplibs="$dir/$linklib $compile_deplibs"
+ finalize_deplibs="$dir/$linklib $finalize_deplibs"
+ else
+ compile_deplibs="-l$name -L$dir $compile_deplibs"
+ finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+ fi
+ elif test "$build_libtool_libs" = yes; then
+ # Not a shared library
+ if test "$deplibs_check_method" != pass_all; then
+ # We're trying link a shared library against a static one
+ # but the system doesn't support it.
+
+ # Just print a warning and add the library to dependency_libs so
+ # that the program can be linked against the static library.
+ echo
+ echo "*** Warning: This library needs some functionality provided by $lib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ if test "$module" = yes; then
+ echo "*** Therefore, libtool will create a static module, that should work "
+ echo "*** as long as the dlopening application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ else
+ convenience="$convenience $dir/$old_library"
+ old_convenience="$old_convenience $dir/$old_library"
+ deplibs="$dir/$old_library $deplibs"
+ link_static=yes
+ fi
+ fi # link shared/static library?
+
+ if test $linkmode = lib; then
+ if test -n "$dependency_libs" &&
+ { test $hardcode_into_libs != yes || test $build_old_libs = yes ||
+ test $link_static = yes; }; then
+ # Extract -R from dependency_libs
+ temp_deplibs=
+ for libdir in $dependency_libs; do
+ case $libdir in
+ -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
+ case " $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) xrpath="$xrpath $temp_xrpath";;
+ esac;;
+ *) temp_deplibs="$temp_deplibs $libdir";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
+ fi
+
+ newlib_search_path="$newlib_search_path $absdir"
+ # Link against this library
+ test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+ # ... and its dependency_libs
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ newdependency_libs="$deplib $newdependency_libs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
+
+ if test $link_all_deplibs != no; then
+ # Add the search paths of all dependency libraries
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) path="$deplib" ;;
+ *.la)
+ dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$deplib" && dir="."
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
+ absdir="$dir"
+ fi
+ ;;
+ esac
+ if grep "^installed=no" $deplib > /dev/null; then
+ path="-L$absdir/$objdir"
+ else
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ if test "$absdir" != "$libdir"; then
+ $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
+ fi
+ path="-L$absdir"
+ fi
+ ;;
+ *) continue ;;
+ esac
+ case " $deplibs " in
+ *" $path "*) ;;
+ *) deplibs="$deplibs $path" ;;
+ esac
+ done
+ fi # link_all_deplibs != no
+ fi # linkmode = lib
+ done # for deplib in $libs
+ if test $pass = dlpreopen; then
+ # Link the dlpreopened libraries before other libraries
+ for deplib in $save_deplibs; do
+ deplibs="$deplib $deplibs"
+ done
+ fi
+ if test $pass != dlopen; then
+ test $pass != scan && dependency_libs="$newdependency_libs"
+ if test $pass != conv; then
+ # Make sure lib_search_path contains only unique directories.
+ lib_search_path=
+ for dir in $newlib_search_path; do
+ case "$lib_search_path " in
+ *" $dir "*) ;;
+ *) lib_search_path="$lib_search_path $dir" ;;
+ esac
+ done
+ newlib_search_path=
+ fi
+
+ if test "$linkmode,$pass" != "prog,link"; then
+ vars="deplibs"
+ else
+ vars="compile_deplibs finalize_deplibs"
+ fi
+ for var in $vars dependency_libs; do
+ # Add libraries to $var in reverse order
+ eval tmp_libs=\"\$$var\"
+ new_libs=
+ for deplib in $tmp_libs; do
+ case $deplib in
+ -L*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $specialdeplibs " in
+ *" $deplib "*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$deplib $new_libs" ;;
+ esac
+ ;;
+ esac
+ ;;
+ esac
+ done
+ tmp_libs=
+ for deplib in $new_libs; do
+ case $deplib in
+ -L*)
+ case " $tmp_libs " in
+ *" $deplib "*) ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ done
+ eval $var=\"$tmp_libs\"
+ done # for var
+ fi
+ if test "$pass" = "conv" &&
+ { test "$linkmode" = "lib" || test "$linkmode" = "prog"; }; then
+ libs="$deplibs" # reset libs
+ deplibs=
+ fi
+ done # for pass
+ if test $linkmode = prog; then
+ dlfiles="$newdlfiles"
+ dlprefiles="$newdlprefiles"
+ fi
+
+ case $linkmode in
+ oldlib)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for archives" 1>&2
+ fi
+
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ $echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
+ fi
+
+ # Now set the variables for building old libraries.
+ build_libtool_libs=no
+ oldlibs="$output"
+ objs="$objs$old_deplibs"
+ ;;
+
+ lib)
+ # Make sure we only generate libraries of the form `libNAME.la'.
+ case $outputname in
+ lib*)
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+ eval libname=\"$libname_spec\"
+ ;;
+ *)
+ if test "$module" = no; then
+ $echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ if test "$need_lib_prefix" != no; then
+ # Add the "lib" prefix for modules if required
+ name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ eval libname=\"$libname_spec\"
+ else
+ libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
+ fi
+ ;;
+ esac
+
+ if test -n "$objs"; then
+ if test "$deplibs_check_method" != pass_all; then
+ $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
+ exit 1
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the non-libtool"
+ echo "*** objects $objs is not portable!"
+ libobjs="$libobjs $objs"
+ fi
+ fi
+
+ if test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
+ fi
+
+ set dummy $rpath
+ if test $# -gt 2; then
+ $echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
+ fi
+ install_libdir="$2"
+
+ oldlibs=
+ if test -z "$rpath"; then
+ if test "$build_libtool_libs" = yes; then
+ # Building a libtool convenience library.
+ libext=al
+ oldlibs="$output_objdir/$libname.$libext $oldlibs"
+ build_libtool_libs=convenience
+ build_old_libs=yes
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
+ fi
+ else
+
+ # Parse the version information argument.
+ save_ifs="$IFS"; IFS=':'
+ set dummy $vinfo 0 0 0
+ IFS="$save_ifs"
+
+ if test -n "$8"; then
+ $echo "$modename: too many parameters to \`-version-info'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ current="$2"
+ revision="$3"
+ age="$4"
+
+ # Check that each of the things are valid numbers.
+ case $current in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case $revision in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ case $age in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
+ *)
+ $echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test $age -gt $current; then
+ $echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
+ $echo "$modename: \`$vinfo' is not valid version information" 1>&2
+ exit 1
+ fi
+
+ # Calculate the version variables.
+ major=
+ versuffix=
+ verstring=
+ case $version_type in
+ none) ;;
+
+ darwin)
+ # Like Linux, but with the current version available in
+ # verstring for coding it into the library header
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ # Darwin ld doesn't like 0 for these options...
+ minor_current=`expr $current + 1`
+ verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current";
+ ;;
+
+ irix)
+ major=`expr $current - $age + 1`
+ verstring="sgi$major.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$revision
+ while test $loop != 0; do
+ iface=`expr $revision - $loop`
+ loop=`expr $loop - 1`
+ verstring="sgi$major.$iface:$verstring"
+ done
+
+ # Before this point, $major must not contain `.'.
+ major=.$major
+ versuffix="$major.$revision"
+ ;;
+
+ linux)
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ ;;
+
+ osf)
+ major=`expr $current - $age`
+ versuffix=".$current.$age.$revision"
+ verstring="$current.$age.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$age
+ while test $loop != 0; do
+ iface=`expr $current - $loop`
+ loop=`expr $loop - 1`
+ verstring="$verstring:${iface}.0"
+ done
+
+ # Make executables depend on our current version.
+ verstring="$verstring:${current}.0"
+ ;;
+
+ sunos)
+ major=".$current"
+ versuffix=".$current.$revision"
+ ;;
+
+ windows)
+ # Use '-' rather than '.', since we only want one
+ # extension on DOS 8.3 filesystems.
+ major=`expr $current - $age`
+ versuffix="-$major"
+ ;;
+
+ *)
+ $echo "$modename: unknown library version type \`$version_type'" 1>&2
+ echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Clear the version info if we defaulted, and they specified a release.
+ if test -z "$vinfo" && test -n "$release"; then
+ major=
+ verstring="0.0"
+ case $version_type in
+ darwin)
+ # we can't check for "0.0" in archive_cmds due to quoting
+ # problems, so we reset it completely
+ verstring=""
+ ;;
+ *)
+ verstring="0.0"
+ ;;
+ esac
+ if test "$need_version" = no; then
+ versuffix=
+ else
+ versuffix=".0.0"
+ fi
+ fi
+
+ # Remove version info from name if versioning should be avoided
+ if test "$avoid_version" = yes && test "$need_version" = no; then
+ major=
+ versuffix=
+ verstring=""
+ fi
+
+ # Check to see if the archive will have undefined symbols.
+ if test "$allow_undefined" = yes; then
+ if test "$allow_undefined_flag" = unsupported; then
+ $echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
+ build_libtool_libs=no
+ build_old_libs=yes
+ fi
+ else
+ # Don't allow undefined symbols.
+ allow_undefined_flag="$no_undefined_flag"
+ fi
+ fi
+
+ if test "$mode" != relink; then
+ # Remove our outputs.
+ $show "${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*"
+ $run ${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*
+ fi
+
+ # Now set the variables for building old libraries.
+ if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+ oldlibs="$oldlibs $output_objdir/$libname.$libext"
+
+ # Transform .lo files to .o files.
+ oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
+ fi
+
+ # Eliminate all temporary directories.
+ for path in $notinst_path; do
+ lib_search_path=`echo "$lib_search_path " | sed -e 's% $path % %g'`
+ deplibs=`echo "$deplibs " | sed -e 's% -L$path % %g'`
+ dependency_libs=`echo "$dependency_libs " | sed -e 's% -L$path % %g'`
+ done
+
+ if test -n "$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ temp_xrpath=
+ for libdir in $xrpath; do
+ temp_xrpath="$temp_xrpath -R$libdir"
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ if test $hardcode_into_libs != yes || test $build_old_libs = yes; then
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+ fi
+
+ # Make sure dlfiles contains only unique files that won't be dlpreopened
+ old_dlfiles="$dlfiles"
+ dlfiles=
+ for lib in $old_dlfiles; do
+ case " $dlprefiles $dlfiles " in
+ *" $lib "*) ;;
+ *) dlfiles="$dlfiles $lib" ;;
+ esac
+ done
+
+ # Make sure dlprefiles contains only unique files
+ old_dlprefiles="$dlprefiles"
+ dlprefiles=
+ for lib in $old_dlprefiles; do
+ case "$dlprefiles " in
+ *" $lib "*) ;;
+ *) dlprefiles="$dlprefiles $lib" ;;
+ esac
+ done
+
+ if test "$build_libtool_libs" = yes; then
+ if test -n "$rpath"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C library is in the System framework
+ deplibs="$deplibs -framework System"
+ ;;
+ *-*-netbsd*)
+ # Don't link with libc until the a.out ld.so is fixed.
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ #### Local change for Sleepycat's Berkeley DB [#2380]:
+ # FreeBSD, like OpenBSD, uses libc/libc_r and should not
+ # link against libc/c_r explicitly; the -pthread linker flag
+ # implicitly controls use of -lc and -lc_r.
+ *-*-freebsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ *)
+ # Add libc to deplibs on all other systems if necessary.
+ if test $build_libtool_need_lc = "yes"; then
+ deplibs="$deplibs -lc"
+ fi
+ ;;
+ esac
+ fi
+
+ # Transform deplibs into only deplibs that can be linked in shared.
+ name_save=$name
+ libname_save=$libname
+ release_save=$release
+ versuffix_save=$versuffix
+ major_save=$major
+ # I'm not sure if I'm treating the release correctly. I think
+ # release should show up in the -l (ie -lgmp5) so we don't want to
+ # add it in twice. Is that correct?
+ release=""
+ versuffix=""
+ major=""
+ newdeplibs=
+ droppeddeps=no
+ case $deplibs_check_method in
+ pass_all)
+ # Don't check for shared/static. Everything works.
+ # This might be a little naive. We might want to check
+ # whether the library exists or not. But this is on
+ # osf3 & osf4 and I'm not really sure... Just
+ # implementing what was already the behaviour.
+ newdeplibs=$deplibs
+ ;;
+ test_compile)
+ # This code stresses the "libraries are programs" paradigm to its
+ # limits. Maybe even breaks it. We compile a program, linking it
+ # against the deplibs as a proxy for the library. Then we can check
+ # whether they linked in statically or dynamically with ldd.
+ $rm conftest.c
+ cat > conftest.c <<EOF
+ int main() { return 0; }
+EOF
+ $rm conftest
+ $CC -o conftest conftest.c $deplibs
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ else
+ # Error occured in the first compile. Let's try to salvage the situation:
+ # Compile a seperate program for each library.
+ for i in $deplibs; do
+ name="`expr $i : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ $rm conftest
+ $CC -o conftest conftest.c $i
+ # Did it work?
+ if test $? -eq 0 ; then
+ ldd_output=`ldd conftest`
+ libname=`eval \\$echo \"$libname_spec\"`
+ deplib_matches=`eval \\$echo \"$library_names_spec\"`
+ set dummy $deplib_matches
+ deplib_match=$2
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ droppeddeps=yes
+ echo
+ echo "*** Warning! Library $i is needed by this library but I was not able to"
+ echo "*** make it link in! You will probably need to install it or some"
+ echo "*** library that it depends on before this library will be fully"
+ echo "*** functional. Installing it before continuing would be even better."
+ fi
+ else
+ newdeplibs="$newdeplibs $i"
+ fi
+ done
+ fi
+ ;;
+ file_magic*)
+ set dummy $deplibs_check_method
+ file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ # Follow soft links.
+ if ls -lLd "$potent_lib" 2>/dev/null \
+ | grep " -> " >/dev/null; then
+ continue
+ fi
+ # The statement above tries to avoid entering an
+ # endless loop below, in case of cyclic links.
+ # We might still enter an endless loop, since a link
+ # loop can be closed while we follow links,
+ # but so what?
+ potlib="$potent_lib"
+ while test -h "$potlib" 2>/dev/null; do
+ potliblink=`ls -ld $potlib | sed 's/.* -> //'`
+ case $potliblink in
+ [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+ *) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
+ esac
+ done
+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$file_magic_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
+ match_pattern*)
+ set dummy $deplibs_check_method
+ match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ if eval echo \"$potent_lib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$match_pattern_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
+ none | unknown | *)
+ newdeplibs=""
+ if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
+ -e 's/ -[LR][^ ]*//g' -e 's/[ ]//g' |
+ grep . >/dev/null; then
+ echo
+ if test "X$deplibs_check_method" = "Xnone"; then
+ echo "*** Warning: inter-library dependencies are not supported in this platform."
+ else
+ echo "*** Warning: inter-library dependencies are not known to be supported."
+ fi
+ echo "*** All declared inter-library dependencies are being dropped."
+ droppeddeps=yes
+ fi
+ ;;
+ esac
+ versuffix=$versuffix_save
+ major=$major_save
+ release=$release_save
+ libname=$libname_save
+ name=$name_save
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
+ if test "$droppeddeps" = yes; then
+ if test "$module" = yes; then
+ echo
+ echo "*** Warning: libtool could not satisfy all declared inter-library"
+ echo "*** dependencies of module $libname. Therefore, libtool will create"
+ echo "*** a static module, that should work as long as the dlopening"
+ echo "*** application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ else
+ echo "*** The inter-library dependencies that have been dropped here will be"
+ echo "*** automatically added whenever a program is linked with this library"
+ echo "*** or is declared to -dlopen it."
+
+ if test $allow_undefined = no; then
+ echo
+ echo "*** Since this library must not contain undefined symbols,"
+ echo "*** because either the platform does not support them or"
+ echo "*** it was explicitly requested with -no-undefined,"
+ echo "*** libtool will only create a static version of it."
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ fi
+ fi
+ # Done checking deplibs!
+ deplibs=$newdeplibs
+ fi
+
+ # All the library-specific variables (install_libdir is set above).
+ library_names=
+ old_library=
+ dlname=
+
+ # Test again, we may have decided not to build it any more
+ if test "$build_libtool_libs" = yes; then
+ if test $hardcode_into_libs = yes; then
+ # Hardcode the library paths
+ hardcode_libdirs=
+ dep_rpath=
+ rpath="$finalize_rpath"
+ test "$mode" != relink && rpath="$compile_rpath$rpath"
+ for libdir in $rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ dep_rpath="$dep_rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval dep_rpath=\"$hardcode_libdir_flag_spec\"
+ fi
+ if test -n "$runpath_var" && test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+ fi
+ test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+ fi
+
+ shlibpath="$finalize_shlibpath"
+ test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ if test -n "$shlibpath"; then
+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+ fi
+
+ # Get the real and link names of the library.
+ eval library_names=\"$library_names_spec\"
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+
+ if test -n "$soname_spec"; then
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+ test -z "$dlname" && dlname=$soname
+
+ lib="$output_objdir/$realname"
+ for link
+ do
+ linknames="$linknames $link"
+ done
+
+ # Ensure that we have .o objects for linkers which dislike .lo
+ # (e.g. aix) in case we are running --disable-static
+ for obj in $libobjs; do
+ xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$obj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ if test ! -f $xdir/$oldobj; then
+ $show "(cd $xdir && ${LN_S} $baseobj $oldobj)"
+ $run eval '(cd $xdir && ${LN_S} $baseobj $oldobj)' || exit $?
+ fi
+ done
+
+ # Use standard objects if they are pic
+ test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+ $show "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $run $rm $export_symbols
+ eval cmds=\"$export_symbols_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ if test -n "$export_symbols_regex"; then
+ $show "egrep -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
+ $run eval 'egrep -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ $show "$mv \"${export_symbols}T\" \"$export_symbols\""
+ $run eval '$mv "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+ fi
+
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ $run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
+ fi
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ libobjs="$libobjs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+ eval flag=\"$thread_safe_flag_spec\"
+ linker_flags="$linker_flags $flag"
+ fi
+
+ # Make a backup of the uninstalled library when relinking
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
+ fi
+
+ # Do each of the archive commands.
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ eval cmds=\"$archive_expsym_cmds\"
+ else
+ eval cmds=\"$archive_cmds\"
+ fi
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
+ exit 0
+ fi
+
+ # Create links to the real library.
+ for linkname in $linknames; do
+ if test "$realname" != "$linkname"; then
+ $show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
+ fi
+ done
+
+ # If -module or -export-dynamic was specified, set the dlname.
+ if test "$module" = yes || test "$export_dynamic" = yes; then
+ # On all known operating systems, these are identical.
+ dlname="$soname"
+ fi
+ fi
+ ;;
+
+ obj)
+ if test -n "$deplibs"; then
+ $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
+ fi
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$rpath"; then
+ $echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$xrpath"; then
+ $echo "$modename: warning: \`-R' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for objects" 1>&2
+ fi
+
+ case $output in
+ *.lo)
+ if test -n "$objs$old_deplibs"; then
+ $echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
+ exit 1
+ fi
+ libobj="$output"
+ obj=`$echo "X$output" | $Xsed -e "$lo2o"`
+ ;;
+ *)
+ libobj=
+ obj="$output"
+ ;;
+ esac
+
+ # Delete the old objects.
+ $run $rm $obj $libobj
+
+ # Objects from convenience libraries. This assumes
+ # single-version convenience libraries. Whenever we create
+ # different ones for PIC/non-PIC, this we'll have to duplicate
+ # the extraction.
+ reload_conv_objs=
+ gentop=
+ # reload_cmds runs $LD directly, so let us get rid of
+ # -Wl from whole_archive_flag_spec
+ wl=
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
+ else
+ gentop="$output_objdir/${obj}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ for xlib in $convenience; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ reload_conv_objs="$reload_objs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+ fi
+
+ # Create the old-style object.
+ reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+
+ output="$obj"
+ eval cmds=\"$reload_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+
+ # Exit if we aren't doing a library object file.
+ if test -z "$libobj"; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ fi
+
+ if test "$build_libtool_libs" != yes; then
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we don't
+ # accidentally link it into a program.
+ $show "echo timestamp > $libobj"
+ $run eval "echo timestamp > $libobj" || exit $?
+ exit 0
+ fi
+
+ if test -n "$pic_flag" || test "$pic_mode" != default; then
+ # Only do commands if we really have different PIC objects.
+ reload_objs="$libobjs $reload_conv_objs"
+ output="$libobj"
+ eval cmds=\"$reload_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ else
+ # Just create a symlink.
+ $show $rm $libobj
+ $run $rm $libobj
+ xdir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$libobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
+ oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
+ $show "(cd $xdir && $LN_S $oldobj $baseobj)"
+ $run eval '(cd $xdir && $LN_S $oldobj $baseobj)' || exit $?
+ fi
+
+ if test -n "$gentop"; then
+ $show "${rm}r $gentop"
+ $run ${rm}r $gentop
+ fi
+
+ exit 0
+ ;;
+
+ prog)
+ case $host in
+ *cygwin*) output=`echo $output | sed -e 's,.exe$,,;s,$,.exe,'` ;;
+ esac
+ if test -n "$vinfo"; then
+ $echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
+ fi
+
+ if test -n "$release"; then
+ $echo "$modename: warning: \`-release' is ignored for programs" 1>&2
+ fi
+
+ if test "$preload" = yes; then
+ if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
+ test "$dlopen_self_static" = unknown; then
+ $echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
+ fi
+ fi
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
+ compile_command="$compile_command $compile_deplibs"
+ finalize_command="$finalize_command $finalize_deplibs"
+
+ if test -n "$rpath$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ for libdir in $rpath $xrpath; do
+ # This is the magic to use -rpath.
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ fi
+
+ # Now hardcode the library paths
+ rpath=
+ hardcode_libdirs=
+ for libdir in $compile_rpath $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$libdir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$libdir";;
+ esac
+ ;;
+ esac
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ compile_rpath="$rpath"
+
+ rpath=
+ hardcode_libdirs=
+ for libdir in $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$finalize_perm_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ finalize_rpath="$rpath"
+
+ if test -n "$libobjs" && test "$build_old_libs" = yes; then
+ # Transform all the library objects into standard objects.
+ compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ fi
+
+ dlsyms=
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ if test -n "$NM" && test -n "$global_symbol_pipe"; then
+ dlsyms="${outputname}S.c"
+ else
+ $echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
+ fi
+ fi
+
+ if test -n "$dlsyms"; then
+ case $dlsyms in
+ "") ;;
+ *.c)
+ # Discover the nlist of each of the dlfiles.
+ nlist="$output_objdir/${outputname}.nm"
+
+ $show "$rm $nlist ${nlist}S ${nlist}T"
+ $run $rm "$nlist" "${nlist}S" "${nlist}T"
+
+ # Parse the name list into a source file.
+ $show "creating $output_objdir/$dlsyms"
+
+ test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
+/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
+/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+/* Prevent the only kind of declaration conflicts we can make. */
+#define lt_preloaded_symbols some_other_symbol
+
+/* External symbol declarations for the compiler. */\
+"
+
+ if test "$dlself" = yes; then
+ $show "generating symbol list for \`$output'"
+
+ test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
+
+ # Add our own program objects to the symbol list.
+ progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ for arg in $progfiles; do
+ $show "extracting global C symbols from \`$arg'"
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -n "$exclude_expsyms"; then
+ $run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ if test -n "$export_symbols_regex"; then
+ $run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+ $run eval '$mv "$nlist"T "$nlist"'
+ fi
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ export_symbols="$output_objdir/$output.exp"
+ $run $rm $export_symbols
+ $run eval "sed -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+ else
+ $run eval "sed -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
+ $run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
+ $run eval 'mv "$nlist"T "$nlist"'
+ fi
+ fi
+
+ for arg in $dlprefiles; do
+ $show "extracting global C symbols from \`$arg'"
+ name=`echo "$arg" | sed -e 's%^.*/%%'`
+ $run eval 'echo ": $name " >> "$nlist"'
+ $run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -z "$run"; then
+ # Make sure we have at least an empty file.
+ test -f "$nlist" || : > "$nlist"
+
+ if test -n "$exclude_expsyms"; then
+ egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+ $mv "$nlist"T "$nlist"
+ fi
+
+ # Try sorting and uniquifying the output.
+ if grep -v "^: " < "$nlist" | sort +2 | uniq > "$nlist"S; then
+ :
+ else
+ grep -v "^: " < "$nlist" > "$nlist"S
+ fi
+
+ if test -f "$nlist"S; then
+ eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
+ else
+ echo '/* NONE */' >> "$output_objdir/$dlsyms"
+ fi
+
+ $echo >> "$output_objdir/$dlsyms" "\
+
+#undef lt_preloaded_symbols
+
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[] =
+{\
+"
+
+ eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
+
+ $echo >> "$output_objdir/$dlsyms" "\
+ {0, (lt_ptr) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+ fi
+
+ pic_flag_for_symtable=
+ case $host in
+ # compiling the symbol table file with pic_flag works around
+ # a FreeBSD bug that causes programs to crash when -lm is
+ # linked before any other PIC object. But we must not use
+ # pic_flag when linking with -static. The problem exists in
+ # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+ *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC -DFREEBSD_WORKAROUND";;
+ esac;;
+ *-*-hpux*)
+ case "$compile_command " in
+ *" -static "*) ;;
+ *) pic_flag_for_symtable=" $pic_flag -DPIC";;
+ esac
+ esac
+
+ # Now compile the dynamic symbol file.
+ $show "(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
+ $run eval '(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
+
+ # Clean up the generated files.
+ $show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
+ $run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
+
+ # Transform the symbol file into the correct name.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
+ ;;
+ *)
+ $echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
+ exit 1
+ ;;
+ esac
+ else
+ # We keep going just in case the user didn't refer to
+ # lt_preloaded_symbols. The linker will fail if global_symbol_pipe
+ # really was required.
+
+ # Nullify the symbol file.
+ compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
+ finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
+ fi
+
+ if test $need_relink = no || test "$build_libtool_libs" != yes; then
+ # Replace the output file specification.
+ compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ link_command="$compile_command$compile_rpath"
+
+ # We have no uninstalled library dependencies, so finalize right now.
+ $show "$link_command"
+ $run eval "$link_command"
+ status=$?
+
+ # Delete the generated files.
+ if test -n "$dlsyms"; then
+ $show "$rm $output_objdir/${outputname}S.${objext}"
+ $run $rm "$output_objdir/${outputname}S.${objext}"
+ fi
+
+ exit $status
+ fi
+
+ if test -n "$shlibpath_var"; then
+ # We should set the shlibpath_var
+ rpath=
+ for dir in $temp_rpath; do
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*)
+ # Absolute path.
+ rpath="$rpath$dir:"
+ ;;
+ *)
+ # Relative path: add a thisdir entry.
+ rpath="$rpath\$thisdir/$dir:"
+ ;;
+ esac
+ done
+ temp_rpath="$rpath"
+ fi
+
+ if test -n "$compile_shlibpath$finalize_shlibpath"; then
+ compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+ fi
+ if test -n "$finalize_shlibpath"; then
+ finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+ fi
+
+ compile_var=
+ finalize_var=
+ if test -n "$runpath_var"; then
+ if test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ if test -n "$finalize_perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $finalize_perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ fi
+
+ if test "$no_install" = yes; then
+ # We don't need to create a wrapper script.
+ link_command="$compile_var$compile_command$compile_rpath"
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ # Delete the old output file.
+ $run $rm $output
+ # Link the executable and exit
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+ exit 0
+ fi
+
+ if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+ $echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
+ $echo "$modename: \`$output' will be relinked during installation" 1>&2
+ else
+ if test "$fast_install" != no; then
+ link_command="$finalize_var$compile_command$finalize_rpath"
+ if test "$fast_install" = yes; then
+ relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
+ else
+ # fast_install is set to needless
+ relink_command=
+ fi
+ else
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+ fi
+ fi
+
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+ # Delete the old output files.
+ $run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+
+ # Now create the wrapper script.
+ $show "creating $output"
+
+ # Quote the relink command for shipping.
+ if test -n "$relink_command"; then
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ relink_command="cd `pwd`; $relink_command"
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Quote $echo for shipping.
+ if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
+ case $0 in
+ [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
+ *) qecho="$SHELL `pwd`/$0 --fallback-echo";;
+ esac
+ qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
+ else
+ qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Only actually do things if our run command is non-null.
+ if test -z "$run"; then
+ # win32 will think the script is a binary if it has
+ # a .exe suffix, so we strip it off here.
+ case $output in
+ *.exe) output=`echo $output|sed 's,.exe$,,'` ;;
+ esac
+ # test for cygwin because mv fails w/o .exe extensions
+ case $host in
+ *cygwin*) exeext=.exe ;;
+ *) exeext= ;;
+ esac
+ $rm $output
+ trap "$rm $output; exit 1" 1 2 15
+
+ $echo > $output "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e 1s/^X//'
+sed_quote_subst='$sed_quote_subst'
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+ # install mode needs the following variable:
+ notinst_deplibs='$notinst_deplibs'
+else
+ # When we are sourced in execute mode, \$file and \$echo are already set.
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ echo=\"$qecho\"
+ file=\"\$0\"
+ # Make sure echo works.
+ if test \"X\$1\" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+ elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
+ # Yippee, \$echo works!
+ :
+ else
+ # Restart under the correct shell, and then maybe \$echo will work.
+ exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
+ fi
+ fi\
+"
+ $echo >> $output "\
+
+ # Find the directory that this script lives in.
+ thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
+ test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+ # Follow symbolic links until we get to the real thisdir.
+ file=\`ls -ld \"\$file\" | sed -n 's/.*-> //p'\`
+ while test -n \"\$file\"; do
+ destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
+
+ # If there was a directory component, then change thisdir.
+ if test \"x\$destdir\" != \"x\$file\"; then
+ case \"\$destdir\" in
+ [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+ *) thisdir=\"\$thisdir/\$destdir\" ;;
+ esac
+ fi
+
+ file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | sed -n 's/.*-> //p'\`
+ done
+
+ # Try to get the absolute directory name.
+ absdir=\`cd \"\$thisdir\" && pwd\`
+ test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+ if test "$fast_install" = yes; then
+ echo >> $output "\
+ program=lt-'$outputname'$exeext
+ progdir=\"\$thisdir/$objdir\"
+
+ if test ! -f \"\$progdir/\$program\" || \\
+ { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
+ test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+ file=\"\$\$-\$program\"
+
+ if test ! -d \"\$progdir\"; then
+ $mkdir \"\$progdir\"
+ else
+ $rm \"\$progdir/\$file\"
+ fi"
+
+ echo >> $output "\
+
+ # relink executable if necessary
+ if test -n \"\$relink_command\"; then
+ if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+ else
+ $echo \"\$relink_command_output\" >&2
+ $rm \"\$progdir/\$file\"
+ exit 1
+ fi
+ fi
+
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+ { $rm \"\$progdir/\$program\";
+ $mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+ $rm \"\$progdir/\$file\"
+ fi"
+ else
+ echo >> $output "\
+ program='$outputname'
+ progdir=\"\$thisdir/$objdir\"
+"
+ fi
+
+ echo >> $output "\
+
+ if test -f \"\$progdir/\$program\"; then"
+
+ # Export our shlibpath_var if we have one.
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ $echo >> $output "\
+ # Add our own library path to $shlibpath_var
+ $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+ # Some systems cannot cope with colon-terminated $shlibpath_var
+ # The second colon is a workaround for a bug in BeOS R4 sed
+ $shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
+
+ export $shlibpath_var
+"
+ fi
+
+ # fixup the dll searchpath if we need to.
+ if test -n "$dllsearchpath"; then
+ $echo >> $output "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
+ $echo >> $output "\
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ # Run the actual program with our arguments.
+"
+ case $host in
+ # win32 systems need to use the prog path for dll
+ # lookup to work
+ *-*-cygwin* | *-*-pw32*)
+ $echo >> $output "\
+ exec \$progdir/\$program \${1+\"\$@\"}
+"
+ ;;
+
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2*)
+ $echo >> $output "\
+ exec \$progdir\\\\\$program \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $echo >> $output "\
+ # Export the path to the program.
+ PATH=\"\$progdir:\$PATH\"
+ export PATH
+
+ exec \$program \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $echo >> $output "\
+ \$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
+ exit 1
+ fi
+ else
+ # The program doesn't exist.
+ \$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
+ \$echo \"This script is just a wrapper for \$program.\" 1>&2
+ echo \"See the $PACKAGE documentation for more information.\" 1>&2
+ exit 1
+ fi
+fi\
+"
+ chmod +x $output
+ fi
+ exit 0
+ ;;
+ esac
+
+ # See if we need to build an old-fashioned archive.
+ for oldlib in $oldlibs; do
+
+ if test "$build_libtool_libs" = convenience; then
+ oldobjs="$libobjs_save"
+ addlibs="$convenience"
+ build_libtool_libs=no
+ else
+ if test "$build_libtool_libs" = module; then
+ oldobjs="$libobjs_save"
+ build_libtool_libs=no
+ else
+ oldobjs="$objs$old_deplibs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
+ fi
+ addlibs="$old_convenience"
+ fi
+
+ if test -n "$addlibs"; then
+ gentop="$output_objdir/${outputname}x"
+ $show "${rm}r $gentop"
+ $run ${rm}r "$gentop"
+ $show "mkdir $gentop"
+ $run mkdir "$gentop"
+ status=$?
+ if test $status -ne 0 && test ! -d "$gentop"; then
+ exit $status
+ fi
+ generated="$generated $gentop"
+
+ # Add in members from convenience archives.
+ for xlib in $addlibs; do
+ # Extract the objects.
+ case $xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
+ *) xabs=`pwd`"/$xlib" ;;
+ esac
+ xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
+ xdir="$gentop/$xlib"
+
+ $show "${rm}r $xdir"
+ $run ${rm}r "$xdir"
+ $show "mkdir $xdir"
+ $run mkdir "$xdir"
+ status=$?
+ if test $status -ne 0 && test ! -d "$xdir"; then
+ exit $status
+ fi
+ $show "(cd $xdir && $AR x $xabs)"
+ $run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
+
+ oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
+ done
+ fi
+
+ # Do each command in the archive commands.
+ if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+ eval cmds=\"$old_archive_from_new_cmds\"
+ else
+ # Ensure that we have .o objects in place in case we decided
+ # not to build a shared library, and have fallen back to building
+ # static libs even though --disable-static was passed!
+ for oldobj in $oldobjs; do
+ if test ! -f $oldobj; then
+ xdir=`$echo "X$oldobj" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$xdir" = "X$oldobj"; then
+ xdir="."
+ else
+ xdir="$xdir"
+ fi
+ baseobj=`$echo "X$oldobj" | $Xsed -e 's%^.*/%%'`
+ obj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
+ $show "(cd $xdir && ${LN_S} $obj $baseobj)"
+ $run eval '(cd $xdir && ${LN_S} $obj $baseobj)' || exit $?
+ fi
+ done
+
+ eval cmds=\"$old_archive_cmds\"
+ fi
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$generated"; then
+ $show "${rm}r$generated"
+ $run ${rm}r$generated
+ fi
+
+ # Now create the libtool archive.
+ case $output in
+ *.la)
+ old_library=
+ test "$build_old_libs" = yes && old_library="$libname.$libext"
+ $show "creating $output"
+
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ # Quote the link command for shipping.
+ relink_command="cd `pwd`; $SHELL $0 --mode=relink $libtool_args"
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+
+ # Only create the output if not a dry run.
+ if test -z "$run"; then
+ for installed in no yes; do
+ if test "$installed" = yes; then
+ if test -z "$install_libdir"; then
+ break
+ fi
+ output="$output_objdir/$outputname"i
+ # Replace all uninstalled libtool libraries with the installed ones
+ newdependency_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ *.la)
+ name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdependency_libs="$newdependency_libs $libdir/$name"
+ ;;
+ *) newdependency_libs="$newdependency_libs $deplib" ;;
+ esac
+ done
+ dependency_libs="$newdependency_libs"
+ newdlfiles=
+ for lib in $dlfiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlfiles="$newdlfiles $libdir/$name"
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlprefiles="$newdlprefiles $libdir/$name"
+ done
+ dlprefiles="$newdlprefiles"
+ fi
+ $rm $output
+ # place dlname in correct position for cygwin
+ tdlname=$dlname
+ case $host,$output,$installed,$module,$dlname in
+ *cygwin*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
+ esac
+ $echo > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+ if test "$installed" = no && test $need_relink = yes; then
+ $echo >> $output "\
+relink_command=\"$relink_command\""
+ fi
+ done
+ fi
+
+ # Do a symbolic link so that the libtool archive can be found in
+ # LD_LIBRARY_PATH before the program is installed.
+ $show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
+ $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
+ ;;
+ esac
+ exit 0
+ ;;
+
+ # libtool install mode
+ install)
+ modename="$modename: install"
+
+ # There may be an optional sh(1) argument at the beginning of
+ # install_prog (especially on Windows NT).
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+ # Allow the use of GNU shtool's install command.
+ $echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
+ # Aesthetically quote it.
+ arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$arg "
+ arg="$1"
+ shift
+ else
+ install_prog=
+ arg="$nonopt"
+ fi
+
+ # The real first argument should be the name of the installation program.
+ # Aesthetically quote it.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog$arg"
+
+ # We need to accept at least all the BSD install flags.
+ dest=
+ files=
+ opts=
+ prev=
+ install_type=
+ isdir=no
+ stripme=
+ for arg
+ do
+ if test -n "$dest"; then
+ files="$files $dest"
+ dest="$arg"
+ continue
+ fi
+
+ case $arg in
+ -d) isdir=yes ;;
+ -f) prev="-f" ;;
+ -g) prev="-g" ;;
+ -m) prev="-m" ;;
+ -o) prev="-o" ;;
+ -s)
+ stripme=" -s"
+ continue
+ ;;
+ -*) ;;
+
+ *)
+ # If the previous option needed an argument, then skip it.
+ if test -n "$prev"; then
+ prev=
+ else
+ dest="$arg"
+ continue
+ fi
+ ;;
+ esac
+
+ # Aesthetically quote the argument.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ arg="\"$arg\""
+ ;;
+ esac
+ install_prog="$install_prog $arg"
+ done
+
+ if test -z "$install_prog"; then
+ $echo "$modename: you must specify an install program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prev' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test -z "$files"; then
+ if test -z "$dest"; then
+ $echo "$modename: no file or destination specified" 1>&2
+ else
+ $echo "$modename: you must specify a destination" 1>&2
+ fi
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Strip any trailing slash from the destination.
+ dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
+
+ # Check to see that the destination is a directory.
+ test -d "$dest" && isdir=yes
+ if test "$isdir" = yes; then
+ destdir="$dest"
+ destname=
+ else
+ destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$destdir" = "X$dest" && destdir=.
+ destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
+
+ # Not a directory, so check to see that there is only one file specified.
+ set dummy $files
+ if test $# -gt 2; then
+ $echo "$modename: \`$dest' is not a directory" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+ fi
+ case $destdir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ for file in $files; do
+ case $file in
+ *.lo) ;;
+ *)
+ $echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ staticlibs=
+ future_libdirs=
+ current_libdirs=
+ for file in $files; do
+
+ # Do each installation.
+ case $file in
+ *.$libext)
+ # Do the static libraries later.
+ staticlibs="$staticlibs $file"
+ ;;
+
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$file' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ library_names=
+ old_library=
+ relink_command=
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Add the libdir to current_libdirs if it is the destination.
+ if test "X$destdir" = "X$libdir"; then
+ case "$current_libdirs " in
+ *" $libdir "*) ;;
+ *) current_libdirs="$current_libdirs $libdir" ;;
+ esac
+ else
+ # Note the libdir as a future libdir.
+ case "$future_libdirs " in
+ *" $libdir "*) ;;
+ *) future_libdirs="$future_libdirs $libdir" ;;
+ esac
+ fi
+
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
+ test "X$dir" = "X$file/" && dir=
+ dir="$dir$objdir"
+
+ if test -n "$relink_command"; then
+ $echo "$modename: warning: relinking \`$file'" 1>&2
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ continue
+ fi
+ fi
+
+ # See the names of the shared library.
+ set dummy $library_names
+ if test -n "$2"; then
+ realname="$2"
+ shift
+ shift
+
+ srcname="$realname"
+ test -n "$relink_command" && srcname="$realname"T
+
+ # Install the shared library and build the symlinks.
+ $show "$install_prog $dir/$srcname $destdir/$realname"
+ $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$striplib $destdir/$realname"
+ $run eval "$striplib $destdir/$realname" || exit $?
+ fi
+
+ if test $# -gt 0; then
+ # Delete the old symlinks, and create new ones.
+ for linkname
+ do
+ if test "$linkname" != "$realname"; then
+ $show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ $run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
+ fi
+ done
+ fi
+
+ # Do each command in the postinstall commands.
+ lib="$destdir/$realname"
+ eval cmds=\"$postinstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Install the pseudo-library for information purposes.
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ instname="$dir/$name"i
+ $show "$install_prog $instname $destdir/$name"
+ $run eval "$install_prog $instname $destdir/$name" || exit $?
+
+ # Maybe install the static library, too.
+ test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
+ ;;
+
+ *.lo)
+ # Install (i.e. copy) a libtool object.
+
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Deduce the name of the destination old-style object file.
+ case $destfile in
+ *.lo)
+ staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
+ ;;
+ *.$objext)
+ staticdest="$destfile"
+ destfile=
+ ;;
+ *)
+ $echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ # Install the libtool object if requested.
+ if test -n "$destfile"; then
+ $show "$install_prog $file $destfile"
+ $run eval "$install_prog $file $destfile" || exit $?
+ fi
+
+ # Install the old object if enabled.
+ if test "$build_old_libs" = yes; then
+ # Deduce the name of the old-style object file.
+ staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
+
+ $show "$install_prog $staticobj $staticdest"
+ $run eval "$install_prog \$staticobj \$staticdest" || exit $?
+ fi
+ exit 0
+ ;;
+
+ *)
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ destfile="$destdir/$destfile"
+ fi
+
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ notinst_deplibs=
+ relink_command=
+
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Check the variables that should have been set.
+ if test -z "$notinst_deplibs"; then
+ $echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
+ exit 1
+ fi
+
+ finalize=yes
+ for lib in $notinst_deplibs; do
+ # Check to see that each library is installed.
+ libdir=
+ if test -f "$lib"; then
+ # If there is no directory component, then add one.
+ case $lib in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
+ esac
+ fi
+ libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
+ if test -n "$libdir" && test ! -f "$libfile"; then
+ $echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
+ finalize=no
+ fi
+ done
+
+ relink_command=
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ outputname=
+ if test "$fast_install" = no && test -n "$relink_command"; then
+ if test "$finalize" = yes && test -z "$run"; then
+ tmpdir="/tmp"
+ test -n "$TMPDIR" && tmpdir="$TMPDIR"
+ tmpdir="$tmpdir/libtool-$$"
+ if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
+ else
+ $echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
+ continue
+ fi
+ file=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ outputname="$tmpdir/$file"
+ # Replace the output file specification.
+ relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
+
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ ${rm}r "$tmpdir"
+ continue
+ fi
+ file="$outputname"
+ else
+ $echo "$modename: warning: cannot relink \`$file'" 1>&2
+ fi
+ else
+ # Install the binary that we compiled earlier.
+ file=`$echo "X$file" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
+ fi
+ fi
+
+ # remove .exe since cygwin /usr/bin/install will append another
+ # one anyways
+ case $install_prog,$host in
+ /usr/bin/install*,*cygwin*)
+ case $file:$destfile in
+ *.exe:*.exe)
+ # this is ok
+ ;;
+ *.exe:*)
+ destfile=$destfile.exe
+ ;;
+ *:*.exe)
+ destfile=`echo $destfile | sed -e 's,.exe$,,'`
+ ;;
+ esac
+ ;;
+ esac
+ $show "$install_prog$stripme $file $destfile"
+ $run eval "$install_prog\$stripme \$file \$destfile" || exit $?
+ test -n "$outputname" && ${rm}r "$tmpdir"
+ ;;
+ esac
+ done
+
+ for file in $staticlibs; do
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+
+ # Set up the ranlib parameters.
+ oldlib="$destdir/$name"
+
+ $show "$install_prog $file $oldlib"
+ $run eval "$install_prog \$file \$oldlib" || exit $?
+
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$old_striplib $oldlib"
+ $run eval "$old_striplib $oldlib" || exit $?
+ fi
+
+ # Do each command in the postinstall commands.
+ eval cmds=\"$old_postinstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ done
+
+ if test -n "$future_libdirs"; then
+ $echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
+ fi
+
+ if test -n "$current_libdirs"; then
+ # Maybe just do a dry run.
+ test -n "$run" && current_libdirs=" -n$current_libdirs"
+ exec_cmd='$SHELL $0 --finish$current_libdirs'
+ else
+ exit 0
+ fi
+ ;;
+
+ # libtool finish mode
+ finish)
+ modename="$modename: finish"
+ libdirs="$nonopt"
+ admincmds=
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ for dir
+ do
+ libdirs="$libdirs $dir"
+ done
+
+ for libdir in $libdirs; do
+ if test -n "$finish_cmds"; then
+ # Do each command in the finish commands.
+ eval cmds=\"$finish_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || admincmds="$admincmds
+ $cmd"
+ done
+ IFS="$save_ifs"
+ fi
+ if test -n "$finish_eval"; then
+ # Do the single finish_eval.
+ eval cmds=\"$finish_eval\"
+ $run eval "$cmds" || admincmds="$admincmds
+ $cmds"
+ fi
+ done
+ fi
+
+ # Exit here if they wanted silent mode.
+ test "$show" = ":" && exit 0
+
+ echo "----------------------------------------------------------------------"
+ echo "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ echo " $libdir"
+ done
+ echo
+ echo "If you ever happen to want to link against installed libraries"
+ echo "in a given directory, LIBDIR, you must either use libtool, and"
+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
+ echo "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ echo " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ echo " - add LIBDIR to the \`$runpath_var' environment variable"
+ echo " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
+
+ echo " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ echo " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ echo
+ echo "See any operating system documentation about shared libraries for"
+ echo "more information, such as the ld(1) and ld.so(8) manual pages."
+ echo "----------------------------------------------------------------------"
+ exit 0
+ ;;
+
+ # libtool execute mode
+ execute)
+ modename="$modename: execute"
+
+ # The first argument is the command name.
+ cmd="$nonopt"
+ if test -z "$cmd"; then
+ $echo "$modename: you must specify a COMMAND" 1>&2
+ $echo "$help"
+ exit 1
+ fi
+
+ # Handle -dlopen flags immediately.
+ for file in $execute_dlfiles; do
+ if test ! -f "$file"; then
+ $echo "$modename: \`$file' is not a file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ dir=
+ case $file in
+ *.la)
+ # Check to see that this really is a libtool archive.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ else
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ # Read the libtool library.
+ dlname=
+ library_names=
+
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Skip this library if it cannot be dlopened.
+ if test -z "$dlname"; then
+ # Warn if it was a shared library.
+ test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
+ continue
+ fi
+
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+
+ if test -f "$dir/$objdir/$dlname"; then
+ dir="$dir/$objdir"
+ else
+ $echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
+ exit 1
+ fi
+ ;;
+
+ *.lo)
+ # Just add the directory containing the .lo file.
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$file" && dir=.
+ ;;
+
+ *)
+ $echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
+ continue
+ ;;
+ esac
+
+ # Get the absolute pathname.
+ absdir=`cd "$dir" && pwd`
+ test -n "$absdir" && dir="$absdir"
+
+ # Now add the directory to shlibpath_var.
+ if eval "test -z \"\$$shlibpath_var\""; then
+ eval "$shlibpath_var=\"\$dir\""
+ else
+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+ fi
+ done
+
+ # This variable tells wrapper scripts just to set shlibpath_var
+ # rather than running their programs.
+ libtool_execute_magic="$magic"
+
+ # Check if any of the arguments is a wrapper script.
+ args=
+ for file
+ do
+ case $file in
+ -*) ;;
+ *)
+ # Do a test to see if this is really a libtool program.
+ if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ fi
+ ;;
+ esac
+ # Quote arguments (to preserve shell metacharacters).
+ file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
+ args="$args \"$file\""
+ done
+
+ if test -z "$run"; then
+ if test -n "$shlibpath_var"; then
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
+ fi
+
+ # Restore saved enviroment variables
+ if test "${save_LC_ALL+set}" = set; then
+ LC_ALL="$save_LC_ALL"; export LC_ALL
+ fi
+ if test "${save_LANG+set}" = set; then
+ LANG="$save_LANG"; export LANG
+ fi
+
+ # Now prepare to actually exec the command.
+ exec_cmd='"$cmd"$args'
+ else
+ # Display what would be done.
+ if test -n "$shlibpath_var"; then
+ eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
+ $echo "export $shlibpath_var"
+ fi
+ $echo "$cmd$args"
+ exit 0
+ fi
+ ;;
+
+ # libtool clean and uninstall mode
+ clean | uninstall)
+ modename="$modename: $mode"
+ rm="$nonopt"
+ files=
+ rmforce=
+ exit_status=0
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ for arg
+ do
+ case $arg in
+ -f) rm="$rm $arg"; rmforce=yes ;;
+ -*) rm="$rm $arg" ;;
+ *) files="$files $arg" ;;
+ esac
+ done
+
+ if test -z "$rm"; then
+ $echo "$modename: you must specify an RM program" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ rmdirs=
+
+ for file in $files; do
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$dir" = "X$file"; then
+ dir=.
+ objdir="$objdir"
+ else
+ objdir="$dir/$objdir"
+ fi
+ name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ test $mode = uninstall && objdir="$dir"
+
+ # Remember objdir for removal later, being careful to avoid duplicates
+ if test $mode = clean; then
+ case " $rmdirs " in
+ *" $objdir "*) ;;
+ *) rmdirs="$rmdirs $objdir" ;;
+ esac
+ fi
+
+ # Don't error if the file doesn't exist and rm -f was used.
+ if (test -L "$file") >/dev/null 2>&1 \
+ || (test -h "$file") >/dev/null 2>&1 \
+ || test -f "$file"; then
+ :
+ elif test -d "$file"; then
+ exit_status=1
+ continue
+ elif test "$rmforce" = yes; then
+ continue
+ fi
+
+ rmfiles="$file"
+
+ case $name in
+ *.la)
+ # Possibly a libtool archive, so verify it.
+ if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ . $dir/$name
+
+ # Delete the libtool libraries and symlinks.
+ for n in $library_names; do
+ rmfiles="$rmfiles $objdir/$n"
+ done
+ test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
+ test $mode = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
+
+ if test $mode = uninstall; then
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ eval cmds=\"$postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
+ IFS="$save_ifs"
+ fi
+
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ eval cmds=\"$old_postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
+ IFS="$save_ifs"
+ fi
+ # FIXME: should reinstall the best remaining shared library.
+ fi
+ fi
+ ;;
+
+ *.lo)
+ if test "$build_old_libs" = yes; then
+ oldobj=`$echo "X$name" | $Xsed -e "$lo2o"`
+ rmfiles="$rmfiles $dir/$oldobj"
+ fi
+ ;;
+
+ *)
+ # Do a test to see if this is a libtool program.
+ if test $mode = clean &&
+ (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ relink_command=
+ . $dir/$file
+
+ rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
+ if test "$fast_install" = yes && test -n "$relink_command"; then
+ rmfiles="$rmfiles $objdir/lt-$name"
+ fi
+ fi
+ ;;
+ esac
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles || exit_status=1
+ done
+
+ # Try to remove the ${objdir}s in the directories where we deleted files
+ for dir in $rmdirs; do
+ if test -d "$dir"; then
+ $show "rmdir $dir"
+ $run rmdir $dir >/dev/null 2>&1
+ fi
+ done
+
+ exit $exit_status
+ ;;
+
+ "")
+ $echo "$modename: you must specify a MODE" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test -z "$exec_cmd"; then
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ fi
+fi # test -z "$show_help"
+
+if test -n "$exec_cmd"; then
+ eval exec $exec_cmd
+ exit 1
+fi
+
+# We need to display help for each of the modes.
+case $mode in
+"") $echo \
+"Usage: $modename [OPTION]... [MODE-ARG]...
+
+Provide generalized library-building support services.
+
+ --config show all configuration variables
+ --debug enable verbose shell tracing
+-n, --dry-run display commands without modifying any files
+ --features display basic configuration information and exit
+ --finish same as \`--mode=finish'
+ --help display this help message and exit
+ --mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
+ --quiet same as \`--silent'
+ --silent don't print informational messages
+ --version print version information
+
+MODE must be one of the following:
+
+ clean remove files from the build directory
+ compile compile a source file into a libtool object
+ execute automatically set library path, then run a program
+ finish complete the installation of libtool libraries
+ install install libraries or executables
+ link create a library or an executable
+ uninstall remove libraries from an installed directory
+
+MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
+a more detailed description of MODE."
+ exit 0
+ ;;
+
+clean)
+ $echo \
+"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+compile)
+ $echo \
+"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+ -o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -prefer-pic try to building PIC objects only
+ -prefer-non-pic try to building non-PIC objects only
+ -static always build a \`.o' file suitable for static linking
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+ ;;
+
+execute)
+ $echo \
+"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+ -dlopen FILE add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+ ;;
+
+finish)
+ $echo \
+"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges. Use
+the \`--dry-run' option if you just want to see what would be executed."
+ ;;
+
+install)
+ $echo \
+"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command. The first component should be
+either the \`install' or \`cp' program.
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+ ;;
+
+link)
+ $echo \
+"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+ -all-static do not do any dynamic linking at all
+ -avoid-version do not add a version suffix if possible
+ -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
+ -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
+ -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+ -export-symbols SYMFILE
+ try to export only the symbols listed in SYMFILE
+ -export-symbols-regex REGEX
+ try to export only the symbols matching REGEX
+ -LLIBDIR search LIBDIR for required installed libraries
+ -lNAME OUTPUT-FILE requires the installed library libNAME
+ -jnimodule build a library that can dlopened via Java JNI
+ -module build a library that can dlopened
+ -no-fast-install disable the fast-install mode
+ -no-install link a not-installable executable
+ -no-undefined declare that a library does not refer to external symbols
+ -o OUTPUT-FILE create OUTPUT-FILE from the specified objects
+ -release RELEASE specify package release information
+ -rpath LIBDIR the created library will eventually be installed in LIBDIR
+ -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
+ -static do not do any dynamic linking of libtool libraries
+ -version-info CURRENT[:REVISION[:AGE]]
+ specify library version info [each variable defaults to 0]
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename. Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+ ;;
+
+uninstall)
+ $echo \
+"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+*)
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+esac
+
+echo
+$echo "Try \`$modename --help' for more information about other modes."
+
+exit 0
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
diff --git a/storage/bdb/dist/pubdef.in b/storage/bdb/dist/pubdef.in
new file mode 100644
index 00000000000..f42363022cd
--- /dev/null
+++ b/storage/bdb/dist/pubdef.in
@@ -0,0 +1,350 @@
+# Name
+# D == documentation
+# I == include file
+# C == Java case value (declared and initialized)
+# J == Java constant (declared only)
+DB_AFTER D I J
+DB_AGGRESSIVE D I J
+DB_ALREADY_ABORTED * I *
+DB_AM_CHKSUM * I *
+DB_AM_CL_WRITER * I *
+DB_AM_COMPENSATE * I *
+DB_AM_CREATED * I *
+DB_AM_CREATED_MSTR * I *
+DB_AM_DBM_ERROR * I *
+DB_AM_DELIMITER * I *
+DB_AM_DIRTY * I *
+DB_AM_DISCARD * I *
+DB_AM_DUP * I *
+DB_AM_DUPSORT * I *
+DB_AM_ENCRYPT * I *
+DB_AM_FIXEDLEN * I *
+DB_AM_INMEM * I *
+DB_AM_IN_RENAME * I *
+DB_AM_OPEN_CALLED * I *
+DB_AM_PAD * I *
+DB_AM_PGDEF * I *
+DB_AM_RDONLY * I *
+DB_AM_RECNUM * I *
+DB_AM_RECOVER * I *
+DB_AM_RENUMBER * I *
+DB_AM_REVSPLITOFF * I *
+DB_AM_SECONDARY * I *
+DB_AM_SNAPSHOT * I *
+DB_AM_SUBDB * I *
+DB_AM_SWAP * I *
+DB_AM_TXN * I *
+DB_AM_VERIFYING * I *
+DB_APPEND D I J
+DB_ARCH_ABS D I J
+DB_ARCH_DATA D I J
+DB_ARCH_LOG D I J
+DB_AUTO_COMMIT D I J
+DB_BEFORE D I J
+DB_BTREE D I C
+DB_BTREEMAGIC * I *
+DB_BTREEOLDVER * I *
+DB_BTREEVERSION * I *
+DB_CACHED_COUNTS * I J
+DB_CDB_ALLDB D I J
+DB_CHKSUM_SHA1 D I J
+DB_CLIENT D I J
+DB_COMMIT * I *
+DB_CONFIG D * *
+DB_CONSUME D I J
+DB_CONSUME_WAIT D I J
+DB_CREATE D I J
+DB_CURRENT D I J
+DB_CXX_NO_EXCEPTIONS D I J
+DB_DBM_HSEARCH * I *
+DB_DBT_APPMALLOC D I *
+DB_DBT_DUPOK * I *
+DB_DBT_ISSET * I *
+DB_DBT_MALLOC D I J
+DB_DBT_PARTIAL D I J
+DB_DBT_REALLOC D I J
+DB_DBT_USERMEM D I J
+DB_DELETED * I *
+DB_DIRECT D I J
+DB_DIRECT_DB D I J
+DB_DIRECT_LOG D I J
+DB_DIRTY_READ D I J
+DB_DONOTINDEX D I C
+DB_DUP D I J
+DB_DUPSORT D I J
+DB_EID_BROADCAST D I J
+DB_EID_INVALID D I J
+DB_ENCRYPT D I J
+DB_ENCRYPT_AES D I J
+DB_ENV_AUTO_COMMIT * I *
+DB_ENV_CDB * I *
+DB_ENV_CDB_ALLDB * I *
+DB_ENV_CREATE * I *
+DB_ENV_DBLOCAL * I *
+DB_ENV_DIRECT_DB * I *
+DB_ENV_DIRECT_LOG * I *
+DB_ENV_FATAL * I *
+DB_ENV_LOCKDOWN * I *
+DB_ENV_NOLOCKING * I *
+DB_ENV_NOMMAP * I *
+DB_ENV_NOPANIC * I *
+DB_ENV_OPEN_CALLED * I *
+DB_ENV_OVERWRITE * I *
+DB_ENV_PRIVATE * I *
+DB_ENV_REGION_INIT * I *
+DB_ENV_REP_CLIENT * I *
+DB_ENV_REP_LOGSONLY * I *
+DB_ENV_REP_MASTER * I *
+DB_ENV_RPCCLIENT * I *
+DB_ENV_RPCCLIENT_GIVEN * I *
+DB_ENV_SYSTEM_MEM * I *
+DB_ENV_THREAD * I *
+DB_ENV_TXN_NOSYNC * I *
+DB_ENV_TXN_WRITE_NOSYNC * I *
+DB_ENV_YIELDCPU * I *
+DB_EXCL D I J
+DB_EXTENT * I *
+DB_FAST_STAT D I J
+DB_FCNTL_LOCKING * I *
+DB_FILE_ID_LEN * I *
+DB_FIRST D I J
+DB_FLUSH D I J
+DB_FORCE D I J
+DB_GET_BOTH D I J
+DB_GET_BOTHC * I *
+DB_GET_BOTH_RANGE D I J
+DB_GET_RECNO D I J
+DB_HANDLE_LOCK * I *
+DB_HASH D I C
+DB_HASHMAGIC * I *
+DB_HASHOLDVER * I *
+DB_HASHVERSION * I *
+DB_HOME D * *
+DB_INIT_CDB D I J
+DB_INIT_LOCK D I J
+DB_INIT_LOG D I J
+DB_INIT_MPOOL D I J
+DB_INIT_TXN D I J
+DB_JAVA_CALLBACK * I *
+DB_JOINENV D I J
+DB_JOIN_ITEM D I J
+DB_JOIN_NOSORT D I J
+DB_KEYEMPTY D I C
+DB_KEYEXIST D I C
+DB_KEYFIRST D I J
+DB_KEYLAST D I J
+DB_LAST D I J
+DB_LOCKDOWN D I J
+DB_LOCKVERSION * I *
+DB_LOCK_DEADLOCK D I C
+DB_LOCK_DEFAULT D I J
+DB_LOCK_DIRTY * I *
+DB_LOCK_DUMP * I *
+DB_LOCK_EXPIRE D I J
+DB_LOCK_FREE_LOCKER * I *
+DB_LOCK_GET D I J
+DB_LOCK_GET_TIMEOUT D I J
+DB_LOCK_INHERIT * I *
+DB_LOCK_IREAD D I J
+DB_LOCK_IWR D I J
+DB_LOCK_IWRITE D I J
+DB_LOCK_MAXLOCKS D I J
+DB_LOCK_MINLOCKS D I J
+DB_LOCK_MINWRITE D I J
+DB_LOCK_NG * I *
+DB_LOCK_NORUN * I *
+DB_LOCK_NOTEXIST * I *
+DB_LOCK_NOTGRANTED D I C
+DB_LOCK_NOWAIT D I J
+DB_LOCK_OLDEST D I J
+DB_LOCK_PUT D I J
+DB_LOCK_PUT_ALL D I J
+DB_LOCK_PUT_OBJ D I J
+DB_LOCK_PUT_READ * I *
+DB_LOCK_RANDOM D I J
+DB_LOCK_READ D I J
+DB_LOCK_RECORD * I *
+DB_LOCK_REMOVE * I *
+DB_LOCK_SET_TIMEOUT * I *
+DB_LOCK_SWITCH * I *
+DB_LOCK_TIMEOUT D I J
+DB_LOCK_TRADE * I *
+DB_LOCK_UPGRADE * I *
+DB_LOCK_UPGRADE_WRITE * I *
+DB_LOCK_WAIT * I *
+DB_LOCK_WRITE D I J
+DB_LOCK_WWRITE * I *
+DB_LOCK_YOUNGEST D I J
+DB_LOGC_BUF_SIZE * I *
+DB_LOGFILEID_INVALID * I *
+DB_LOGMAGIC * I *
+DB_LOGOLDVER * I *
+DB_LOGVERSION * I *
+DB_LOG_DISK * I *
+DB_LOG_LOCKED * I *
+DB_LOG_SILENT_ERR * I *
+DB_LSTAT_ABORTED * I *
+DB_LSTAT_ERR * I *
+DB_LSTAT_EXPIRED * I *
+DB_LSTAT_FREE * I *
+DB_LSTAT_HELD * I *
+DB_LSTAT_NOTEXIST * I *
+DB_LSTAT_PENDING * I *
+DB_LSTAT_WAITING * I *
+DB_MAX_PAGES * I *
+DB_MAX_RECORDS * I *
+DB_MPOOL_CLEAN D I *
+DB_MPOOL_CREATE D I *
+DB_MPOOL_DIRTY D I *
+DB_MPOOL_DISCARD D I *
+DB_MPOOL_LAST D I *
+DB_MPOOL_NEW D I *
+DB_MULTIPLE D I J
+DB_MULTIPLE_INIT D I *
+DB_MULTIPLE_KEY D I J
+DB_MULTIPLE_KEY_NEXT D I *
+DB_MULTIPLE_NEXT D I *
+DB_MULTIPLE_RECNO_NEXT D I *
+DB_NEEDSPLIT * I *
+DB_NEXT D I J
+DB_NEXT_DUP D I J
+DB_NEXT_NODUP D I J
+DB_NOCOPY * I *
+DB_NODUPDATA D I J
+DB_NOLOCKING D I J
+DB_NOMMAP D I J
+DB_NOORDERCHK D I J
+DB_NOOVERWRITE D I J
+DB_NOPANIC D I J
+DB_NOSERVER D I C
+DB_NOSERVER_HOME D I C
+DB_NOSERVER_ID D I C
+DB_NOSYNC D I J
+DB_NOTFOUND D I C
+DB_ODDFILESIZE D I J
+DB_OK_BTREE * I *
+DB_OK_HASH * I *
+DB_OK_QUEUE * I *
+DB_OK_RECNO * I *
+DB_OLD_VERSION D I C
+DB_OPFLAGS_MASK * I *
+DB_ORDERCHKONLY D I J
+DB_OVERWRITE D I J
+DB_PAGE_LOCK * I *
+DB_PAGE_NOTFOUND D I C
+DB_PANIC_ENVIRONMENT D I J
+DB_PERMANENT * I *
+DB_POSITION D I J
+DB_POSITIONI * I *
+DB_PREV D I J
+DB_PREV_NODUP D I J
+DB_PRINTABLE D I J
+DB_PRIORITY_DEFAULT D I J
+DB_PRIORITY_HIGH D I J
+DB_PRIORITY_LOW D I J
+DB_PRIORITY_VERY_HIGH D I J
+DB_PRIORITY_VERY_LOW D I J
+DB_PRIVATE D I J
+DB_PR_PAGE * I *
+DB_PR_RECOVERYTEST * I *
+DB_QAMMAGIC * I *
+DB_QAMOLDVER * I *
+DB_QAMVERSION * I *
+DB_QUEUE D I C
+DB_RDONLY D I J
+DB_RDWRMASTER * I *
+DB_RECNO D I C
+DB_RECNUM D I J
+DB_RECORDCOUNT * I J
+DB_RECORD_LOCK * I *
+DB_RECOVER D I J
+DB_RECOVER_FATAL D I J
+DB_REDO * I *
+DB_REGION_INIT D I J
+DB_REGION_MAGIC * I *
+DB_RENAMEMAGIC * I *
+DB_RENUMBER D I J
+DB_REP_CLIENT D I J
+DB_REP_DUPMASTER D I C
+DB_REP_HOLDELECTION D I C
+DB_REP_LOGSONLY D I J
+DB_REP_MASTER D I J
+DB_REP_NEWMASTER D I C
+DB_REP_NEWSITE D I C
+DB_REP_OUTDATED D I C
+DB_REP_PERMANENT D I J
+DB_REP_UNAVAIL D I J
+DB_REVSPLITOFF D I J
+DB_RMW D I J
+DB_RUNRECOVERY D I C
+DB_SALVAGE D I J
+DB_SECONDARY_BAD D I C
+DB_SET D I J
+DB_SET_LOCK_TIMEOUT D I J
+DB_SET_RANGE D I J
+DB_SET_RECNO D I J
+DB_SET_TXN_NOW * I *
+DB_SET_TXN_TIMEOUT D I J
+DB_SNAPSHOT D I J
+DB_STAT_CLEAR D I J
+DB_SURPRISE_KID * I *
+DB_SWAPBYTES * I *
+DB_SYSTEM_MEM D I J
+DB_TEST_ELECTINIT * I *
+DB_TEST_ELECTSEND * I *
+DB_TEST_ELECTVOTE1 * I *
+DB_TEST_ELECTVOTE2 * I *
+DB_TEST_ELECTWAIT1 * I *
+DB_TEST_ELECTWAIT2 * I *
+DB_TEST_POSTDESTROY * I *
+DB_TEST_POSTLOG * I *
+DB_TEST_POSTLOGMETA * I *
+DB_TEST_POSTOPEN * I *
+DB_TEST_POSTSYNC * I *
+DB_TEST_PREDESTROY * I *
+DB_TEST_PREOPEN * I *
+DB_TEST_SUBDB_LOCKS * I *
+DB_THREAD D I J
+DB_TIMEOUT * I *
+DB_TRUNCATE D I J
+DB_TXNVERSION * I *
+DB_TXN_ABORT D I C
+DB_TXN_APPLY D I C
+DB_TXN_BACKWARD_ALLOC * I *
+DB_TXN_BACKWARD_ROLL D I C
+DB_TXN_CKP * I *
+DB_TXN_FORWARD_ROLL D I C
+DB_TXN_GETPGNOS * I *
+DB_TXN_LOCK * I *
+DB_TXN_NOSYNC D I J
+DB_TXN_NOWAIT D I J
+DB_TXN_OPENFILES * I *
+DB_TXN_POPENFILES * I *
+DB_TXN_PRINT D I C
+DB_TXN_SYNC D I J
+DB_TXN_WRITE_NOSYNC D I J
+DB_UNDO * I *
+DB_UNKNOWN D I C
+DB_UPDATE_SECONDARY * I *
+DB_UPGRADE D I J
+DB_USE_ENVIRON D I J
+DB_USE_ENVIRON_ROOT D I J
+DB_VERB_CHKPOINT D I J
+DB_VERB_DEADLOCK D I J
+DB_VERB_RECOVERY D I J
+DB_VERB_REPLICATION D I J
+DB_VERB_WAITSFOR D I J
+DB_VERIFY D I J
+DB_VERIFY_BAD D I C
+DB_VERIFY_FATAL * I *
+DB_VERSION_MAJOR * I J
+DB_VERSION_MINOR * I J
+DB_VERSION_PATCH * I J
+DB_VERSION_STRING * I *
+DB_WRITECURSOR D I J
+DB_WRITELOCK * I *
+DB_WRITEOPEN * I *
+DB_WRNOSYNC * I *
+DB_XA_CREATE D I J
+DB_XIDDATASIZE D I J
+DB_YIELDCPU D I J
diff --git a/storage/bdb/dist/s_all b/storage/bdb/dist/s_all
new file mode 100644
index 00000000000..132017def3c
--- /dev/null
+++ b/storage/bdb/dist/s_all
@@ -0,0 +1,23 @@
+#!/bin/sh -
+# $Id: s_all,v 1.10 2001/08/04 14:01:44 bostic Exp $
+
+sh s_dir
+
+#sh s_perm # permissions.
+sh s_symlink # symbolic links.
+sh s_readme # db/README file.
+
+#
+# The following order is important, s_include must run last.
+#
+sh s_config # autoconf.
+sh s_recover # logging/recovery files.
+sh s_rpc # RPC files.
+sh s_include # standard include files.
+
+sh s_win32 # Win32 include files.
+sh s_win32_dsp # Win32 build environment.
+#sh s_vxworks # VxWorks include files.
+#sh s_java # Java support.
+sh s_test # Test suite support.
+sh s_tags # Tags files.
diff --git a/storage/bdb/dist/s_config b/storage/bdb/dist/s_config
new file mode 100755
index 00000000000..3e033da81ab
--- /dev/null
+++ b/storage/bdb/dist/s_config
@@ -0,0 +1,45 @@
+#!/bin/sh -
+# $Id: s_config,v 1.9 2002/05/20 19:18:13 bostic Exp $
+#
+# Build the autoconfiguration files.
+
+trap 'rm -f aclocal.m4 ; exit 0' 0 1 2 3 13 15
+
+msgac="# DO NOT EDIT: automatically built by dist/s_config."
+
+. ./RELEASE
+
+echo "Building aclocal.m4"
+(echo "$msgac" &&
+ echo "AC_DEFUN(AM_VERSION_SET, [" &&
+ echo "AC_SUBST(DB_VERSION_MAJOR)" &&
+ echo "DB_VERSION_MAJOR=$DB_VERSION_MAJOR" &&
+ echo "AC_SUBST(DB_VERSION_MINOR)" &&
+ echo "DB_VERSION_MINOR=$DB_VERSION_MINOR" &&
+ echo "AC_SUBST(DB_VERSION_PATCH)" &&
+ echo "DB_VERSION_PATCH=$DB_VERSION_PATCH" &&
+ echo "AC_SUBST(DB_VERSION_UNIQUE_NAME)" &&
+ echo "DB_VERSION_UNIQUE_NAME=$DB_VERSION_UNIQUE_NAME" &&
+ echo "AC_SUBST(DB_VERSION_STRING)" &&
+ echo "DB_VERSION_STRING=\"\\\"$DB_VERSION_STRING\\\"\"" &&
+ echo "])" &&
+ cat aclocal/*.ac aclocal_java/*.ac) > aclocal.m4
+
+echo "Running autoheader to build config.hin"
+rm -f config.hin
+autoheader
+chmod 444 config.hin
+
+echo "Running autoconf to build configure"
+rm -f configure
+autoconf
+
+# Edit version information we couldn't pre-compute.
+(echo "1,\$s/__EDIT_DB_VERSION__/$DB_VERSION/g" &&
+ echo "w" &&
+ echo "q") | ed configure
+
+rm -rf autom4te.cache
+chmod 555 configure
+
+chmod 555 config.guess config.sub install-sh
diff --git a/storage/bdb/dist/s_crypto b/storage/bdb/dist/s_crypto
new file mode 100644
index 00000000000..f7947cb3e10
--- /dev/null
+++ b/storage/bdb/dist/s_crypto
@@ -0,0 +1,59 @@
+#!/bin/sh -
+# $Id: s_crypto,v 11.5 2002/09/13 13:14:14 bostic Exp $
+
+# Remove crypto from the DB source tree.
+
+. ./RELEASE
+
+d=..
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+if ! test -d $d/crypto; then
+ echo "s_crypto: no crypto sources found in the source tree."
+ exit 1
+fi
+
+# Remove the crypto.
+rm -rf $d/crypto
+
+# Update the docs.
+f=$d/docs/ref/env/encrypt.html
+chmod 664 $f
+(echo '/DOES/' &&
+ echo 's/DOES/DOES NOT/' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+# Win/32.
+f=win_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+f=srcfiles.in
+chmod 664 $f
+(echo 'g/^crypto\//d' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_win32
+ sh ./s_win32_dsp
+
+# VxWorks
+f=vx_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_vxworks
diff --git a/storage/bdb/dist/s_dir b/storage/bdb/dist/s_dir
new file mode 100644
index 00000000000..58513a8321d
--- /dev/null
+++ b/storage/bdb/dist/s_dir
@@ -0,0 +1,42 @@
+#!/bin/sh -
+
+make_dir()
+{
+ if test ! -d $1; then
+ echo "mkdir $1"
+ mkdir $1
+ status=$?
+ if test $status -ne 0 && test ! -d $1; then
+ echo "error: $status"
+ fi
+ fi
+}
+
+echo "Creating directories..."
+
+make_dir ../test_server
+make_dir ../dbinc_auto
+make_dir ../build_vxworks/BerkeleyDB
+make_dir ../build_vxworks/db_archive
+make_dir ../build_vxworks/db_archive/db_archive
+make_dir ../build_vxworks/db_checkpoint
+make_dir ../build_vxworks/db_checkpoint/db_checkpoint
+make_dir ../build_vxworks/db_deadlock
+make_dir ../build_vxworks/db_deadlock/db_deadlock
+make_dir ../build_vxworks/db_dump
+make_dir ../build_vxworks/db_dump/db_dump
+make_dir ../build_vxworks/db_load
+make_dir ../build_vxworks/db_load/db_load
+make_dir ../build_vxworks/db_printlog
+make_dir ../build_vxworks/db_printlog/db_printlog
+make_dir ../build_vxworks/db_recover
+make_dir ../build_vxworks/db_recover/db_recover
+make_dir ../build_vxworks/db_stat
+make_dir ../build_vxworks/db_stat/db_stat
+make_dir ../build_vxworks/db_upgrade
+make_dir ../build_vxworks/db_upgrade/db_upgrade
+make_dir ../build_vxworks/db_verify
+make_dir ../build_vxworks/db_verify/db_verify
+make_dir ../build_vxworks/dbdemo/dbdemo
+make_dir ../dbinc_auto
+
diff --git a/storage/bdb/dist/s_include b/storage/bdb/dist/s_include
new file mode 100755
index 00000000000..878b4a38af1
--- /dev/null
+++ b/storage/bdb/dist/s_include
@@ -0,0 +1,160 @@
+#!/bin/sh -
+# $Id: s_include,v 1.19 2002/03/27 04:31:50 bostic Exp $
+#
+# Build the automatically generated function prototype files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_include. */"
+
+. ./RELEASE
+
+head()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ space)
+ echo ""; shift;;
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo "$msgc"
+ echo "#ifndef $name"
+ echo "#define $name"
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "extern \"C\" {"
+ echo "#endif"
+ echo ""
+ fi
+}
+
+tail()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "}"
+ echo "#endif"
+ fi
+ echo "#endif /* !$name */"
+}
+
+# We are building several files:
+# 1 external #define file
+# 1 external prototype file
+# 1 internal #define file
+# N internal prototype files
+e_dfile=/tmp/__db_c.$$
+e_pfile=/tmp/__db_a.$$
+i_dfile=/tmp/__db_d.$$
+i_pfile=/tmp/__db_b.$$
+trap 'rm -f $e_dfile $e_pfile $i_dfile $i_pfile; exit 0' 0 1 2 3 13 15
+
+head defonly space _DB_EXT_DEF_IN_ > $e_dfile
+head space _DB_EXT_PROT_IN_ > $e_pfile
+head defonly _DB_INT_DEF_IN_ > $i_dfile
+
+# Process the standard directories, creating per-directory prototype
+# files and adding to the external prototype and #define files.
+for i in db btree clib common dbreg env fileops hash hmac \
+ lock log mp mutex os qam rep rpc_client rpc_server tcl txn xa; do
+ head "_${i}_ext_h_" > $i_pfile
+
+ f="../$i/*.c"
+ [ $i = os ] && f="$f ../os_win32/*.c"
+ [ $i = rpc_server ] && f="../$i/c/*.c"
+ [ $i = crypto ] && f="../$i/*.c ../$i/*/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile=$i_dfile \
+ -v i_pfile=$i_pfile $f
+
+ tail "_${i}_ext_h_" >> $i_pfile
+
+ f=../dbinc_auto/${i}_ext.h
+ cmp $i_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_pfile $f && chmod 444 $f)
+done
+
+# Process directories which only add to the external prototype and #define
+# files.
+for i in dbm hsearch; do
+ f="../$i/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+done
+
+# RPC uses rpcgen to generate a header file; post-process it to add more
+# interfaces to the internal #define file.
+sed -e '/extern bool_t xdr___/{' \
+ -e 's/.* //' \
+ -e 's/();//' \
+ -e 's/.*/#define & &@DB_VERSION_UNIQUE_NAME@/' \
+ -e 'p' \
+ -e '}' \
+ -e d < ../dbinc_auto/db_server.h >> $i_dfile
+
+# There are a few globals in DB -- add them to the external/internal
+# #define files.
+(echo "#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@";
+ echo "#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@") >> $i_dfile
+(echo "#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@") >> $e_dfile
+
+# Wrap up the external #defines/prototypes, and internal #defines.
+tail defonly _DB_EXT_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
+
+tail defonly _DB_INT_DEF_IN_ >> $i_dfile
+f=../dbinc_auto/int_def.in
+cmp $i_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_dfile $f && chmod 444 $f)
+
+# DB185 compatibility support.
+head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile
+head space _DB_EXT_185_PROT_IN_ > $e_pfile
+
+f="../db185/*.c"
+awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+
+tail defonly _DB_EXT_185_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_185_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_185_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_185_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
diff --git a/storage/bdb/dist/s_java b/storage/bdb/dist/s_java
new file mode 100755
index 00000000000..f7c96e823a1
--- /dev/null
+++ b/storage/bdb/dist/s_java
@@ -0,0 +1,273 @@
+#!/bin/sh -
+# $Id: s_java,v 1.13 2002/09/09 20:47:30 bostic Exp $
+#
+# Build the Java files.
+
+msgjava="/* DO NOT EDIT: automatically built by dist/s_java. */"
+
+. ./RELEASE
+
+t=/tmp/__java
+c=/tmp/__javajnic
+h=/tmp/__javajnih
+trap 'rm -f $t $c $h; exit 0' 0 1 2 3 13 15
+
+# Build DbConstants.java.
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo 'class DbConstants' &&
+ echo '{' &&
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \
+ -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \
+ -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print " static final int " $1 " = " $2 ";" }' &&
+ echo '}' &&
+ echo &&
+ echo '// end of DbConstants.java') > $t
+
+f=../java/src/com/sleepycat/db/DbConstants.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build Db.java.
+f=../java/src/com/sleepycat/db/Db.java.in
+sed '/BEGIN-JAVA-SPECIAL-CONSTANTS/q' < $f > $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*C$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 " = " $2 ";" }') >> $t
+(for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 ";" }') >> $t
+sed -n \
+ '/END-JAVA-SPECIAL-CONSTANTS/,/BEGIN-JAVA-CONSTANT-INITIALIZATION/p' \
+ < $f >> $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print " " $1 " = DbConstants." $1 ";" }') >> $t
+sed -n '/END-JAVA-CONSTANT-INITIALIZATION/,$p' < $f >> $t
+
+f=../java/src/com/sleepycat/db/Db.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass()
+{
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public String \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public long \1;/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public DbLsn \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public Active \1[];/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public int \1;/p" \
+ -e '}'
+}
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass_jni()
+{
+ c=$3
+ h=$4
+ echo "extern int $2(JNIEnv *jnienv, jclass cl, jobject jobj, struct $1 *statp);" >> $h
+ echo "int $2(JNIEnv *jnienv, jclass cl," >> $c
+ echo " jobject jobj, struct $1 *statp) {" >> $c
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_STRING(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, \1);/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_INT(jnienv, cl, jobj, statp, \1);/p" \
+ -e '}' >> $c
+ echo ' return (0);' >> $c
+ echo '}' >> $c
+}
+
+echo "$msgjava" >> $c
+echo "$msgjava" >> $h
+echo '#include "java_util.h"' >> $c
+
+# Build DbBtreeStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbBtreeStat"
+ echo '{'
+ jclass __db_bt_stat &&
+ echo '}' &&
+ echo '// end of DbBtreeStat.java') > $t
+jclass_jni __db_bt_stat __jv_fill_bt_stat $c $h
+f=../java/src/com/sleepycat/db/DbBtreeStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbHashStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbHashStat"
+ echo '{'
+ jclass __db_h_stat &&
+ echo '}' &&
+ echo '// end of DbHashStat.java') > $t
+jclass_jni __db_h_stat __jv_fill_h_stat $c $h
+f=../java/src/com/sleepycat/db/DbHashStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLockStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLockStat"
+ echo '{'
+ jclass __db_lock_stat &&
+ echo '}' &&
+ echo '// end of DbLockStat.java') > $t
+jclass_jni __db_lock_stat __jv_fill_lock_stat $c $h
+f=../java/src/com/sleepycat/db/DbLockStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLogStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLogStat"
+ echo '{'
+ jclass __db_log_stat &&
+ echo '}' &&
+ echo '// end of DbLogStat.java') > $t
+jclass_jni __db_log_stat __jv_fill_log_stat $c $h
+f=../java/src/com/sleepycat/db/DbLogStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbMpoolFStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbMpoolFStat"
+ echo '{'
+ jclass __db_mpool_fstat &&
+ echo '}' &&
+ echo '// end of DbMpoolFStat.java') > $t
+jclass_jni __db_mpool_stat __jv_fill_mpool_stat $c $h
+f=../java/src/com/sleepycat/db/DbMpoolFStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbQueueStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbQueueStat"
+ echo '{'
+ jclass __db_qam_stat &&
+ echo '}' &&
+ echo '// end of DbQueueStat.java') > $t
+jclass_jni __db_qam_stat __jv_fill_qam_stat $c $h
+f=../java/src/com/sleepycat/db/DbQueueStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbRepStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbRepStat"
+ echo '{'
+ jclass __db_rep_stat &&
+ echo '}' &&
+ echo '// end of DbRepStat.java') > $t
+jclass_jni __db_rep_stat __jv_fill_rep_stat $c $h
+f=../java/src/com/sleepycat/db/DbRepStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbTxnStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbTxnStat"
+ echo '{'
+ echo " public static class Active {"
+ jclass __db_txn_active " " &&
+ echo ' };' &&
+ jclass __db_txn_stat &&
+ echo '}' &&
+ echo '// end of DbTxnStat.java') > $t
+jclass_jni __db_txn_stat __jv_fill_txn_stat $c $h
+f=../java/src/com/sleepycat/db/DbTxnStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $c $t
+f=../libdb_java/java_stat_auto.c
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $h $t
+f=../libdb_java/java_stat_auto.h
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/storage/bdb/dist/s_javah b/storage/bdb/dist/s_javah
new file mode 100755
index 00000000000..67c41d09c4d
--- /dev/null
+++ b/storage/bdb/dist/s_javah
@@ -0,0 +1,55 @@
+#!/bin/sh -
+# $Id: s_javah,v 1.1 2002/08/14 17:14:24 dda Exp $
+#
+# Use javah to build the libdb_java/com_*.h header files.
+#
+# To run this, you will need a javac and javah in your PATH.
+# If possible, install tools with a recent vintage, JDK 1.3 or higher is good.
+# Using Sun's JDK rather than some other installation ensures
+# that the header files will not be constantly changed.
+
+. ./RELEASE
+
+JAVAC=javac
+JAVAH=javah
+export CLASSPATH
+CLASSPATH=
+
+# CLASSES are only those classes for which we have native methods.
+D=com.sleepycat.db
+CLASSES="$D.Dbc $D.DbEnv $D.Db $D.DbLock $D.DbLogc $D.DbLsn $D.Dbt $D.DbTxn $D.xa.DbXAResource"
+
+d=/tmp/__javah
+c=$d/classes
+trap 'rm -rf $d; exit 0' 0 1 2 3 13 15
+
+rm -rf $d
+mkdir $d || exit 1
+mkdir $c || exit 1
+
+# Make skeleton versions of XA classes and interfaces
+# We only need to compile them, not run them.
+pkg="package javax.transaction.xa"
+echo "$pkg; public interface XAResource {}" > $d/XAResource.java
+echo "$pkg; public interface Xid {}" > $d/Xid.java
+echo "$pkg; public class XAException extends Exception {}" \
+ > $d/XAException.java
+
+
+# Create the .class files and use them with javah to create the .h files
+${JAVAC} -d $c $d/*.java \
+ ../java/src/com/sleepycat/db/*.java \
+ ../java/src/com/sleepycat/db/xa/*.java || exit 1
+${JAVAH} -classpath $c -d $d ${CLASSES} || exit 1
+
+for cl in ${CLASSES}; do
+ h=`echo $cl | sed -e 's/\./_/g'`.h
+ t=$d/$h
+ f=../libdb_java/$h
+ if [ ! -f $t ]; then
+ echo "ERROR: $t does not exist"
+ exit 1
+ fi
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
diff --git a/storage/bdb/dist/s_perm b/storage/bdb/dist/s_perm
new file mode 100755
index 00000000000..c35278b8c83
--- /dev/null
+++ b/storage/bdb/dist/s_perm
@@ -0,0 +1,49 @@
+#!/bin/sh -
+# $Id: s_perm,v 1.23 2002/09/09 15:03:06 bostic Exp $
+
+d=..
+echo 'Updating Berkeley DB source tree permissions...'
+
+. ./RELEASE
+
+run()
+{
+ echo " $1 ($2)"
+ if [ -f "$d/$1" ]; then
+ chmod "$2" "$d/$1"
+ else
+ echo "$d/$1: no such file or directory"
+ exit 1
+ fi
+}
+
+run build_win32/include.tcl 664
+run dist/config.guess 555
+run dist/config.sub 555
+run dist/configure 555
+run dist/install-sh 555
+run dist/s_all 555
+run dist/s_config 555
+run dist/s_include 555
+run dist/s_java 555
+run dist/s_javah 555
+run dist/s_perm 555
+run dist/s_readme 555
+run dist/s_recover 555
+run dist/s_rpc 555
+run dist/s_symlink 555
+run dist/s_tags 555
+run dist/s_test 555
+run dist/s_vxworks 555
+run dist/s_win32 555
+run dist/s_win32_dsp 555
+run dist/vx_buildcd 555
+
+run perl/BerkeleyDB/dbinfo 555
+run perl/BerkeleyDB/mkpod 555
+
+for i in `cd $d && find build_vxworks \
+ -name '*.wsp' -o -name '*.cdf' -o -name '*.wpj'`; do
+ echo " $i (775)"
+ chmod 775 $d/$i
+done
diff --git a/storage/bdb/dist/s_readme b/storage/bdb/dist/s_readme
new file mode 100755
index 00000000000..1da9f9681c0
--- /dev/null
+++ b/storage/bdb/dist/s_readme
@@ -0,0 +1,23 @@
+#!/bin/sh -
+# $Id: s_readme,v 1.6 2002/02/14 02:50:26 bostic Exp $
+#
+# Build the README.
+
+d=..
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. ./RELEASE
+
+cat << END_OF_README>$t
+$DB_VERSION_STRING
+
+This is version $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH of Berkeley DB from Sleepycat Software. To view
+the release and installation documentation, load the distribution file
+docs/index.html into your web browser.
+END_OF_README
+
+f=../README
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/storage/bdb/dist/s_recover b/storage/bdb/dist/s_recover
new file mode 100755
index 00000000000..fc2e160c083
--- /dev/null
+++ b/storage/bdb/dist/s_recover
@@ -0,0 +1,69 @@
+#!/bin/sh -
+# $Id: s_recover,v 1.14 2002/03/27 04:31:51 bostic Exp $
+#
+# Build the automatically generated logging/recovery files.
+
+. ./RELEASE
+
+tmp=/tmp/__db_a
+loglist=/tmp/__db_b
+source=/tmp/__db_c
+header=/tmp/__db_d
+template=/tmp/__db_e
+
+trap 'rm -f $tmp $loglist $source $header $template; exit 1' 1 2 3 13 15
+trap 'rm -f $tmp $loglist $source $header $template; exit 0' 0
+
+DIR="db dbreg btree hash qam txn"
+
+# Check to make sure we haven't duplicated a log record entry, and build
+# the list of log record types that the test suite uses.
+for i in $DIR; do
+ p=none
+ for f in ../$i/*.src; do
+ # Grab the PREFIX; there should only be one per file, and
+ # so it's okay to just take the first.
+ grep '^PREFIX' $f | sed q
+ egrep '^BEGIN[ ]|^IGNORED[ ]|^DEPRECATED[ ]' $f |
+ awk '{print $1 "\t" $2 "\t" $3}'
+
+ done
+done > $loglist
+grep -v '^PREFIX' $loglist |
+ awk '{print $2 "\t" $3}' | sort -n -k 2 | uniq -d -f 1 > $tmp
+[ -s $tmp ] && {
+ echo "DUPLICATE LOG VALUES:"
+ cat $tmp
+ rm -f $tmp
+ exit 1
+}
+f=../test/logtrack.list
+cmp $loglist $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $loglist $f && chmod 444 $f)
+
+# Build DB's recovery routines.
+for i in db dbreg btree fileops hash qam txn; do
+ for f in ../$i/*.src; do
+ subsystem=`basename $f .src`
+ awk -f gen_rec.awk \
+ -v source_file=$source \
+ -v header_file=$header \
+ -v template_file=$template < $f
+
+ f=../dbinc_auto/${subsystem}_auto.h
+ cmp $header $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $header $f && chmod 444 $f)
+ f=../$i/${subsystem}_auto.c
+ cmp $source $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $source $f && chmod 444 $f)
+ f=template/rec_${subsystem}
+ cmp $template $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $template $f && chmod 444 $f)
+ done
+done
+
+# Build the example application's recovery routines.
+#(cd ../examples_c/ex_apprec && sh auto_rebuild)
diff --git a/storage/bdb/dist/s_rpc b/storage/bdb/dist/s_rpc
new file mode 100644
index 00000000000..cdafa669d85
--- /dev/null
+++ b/storage/bdb/dist/s_rpc
@@ -0,0 +1,134 @@
+#!/bin/sh -
+# $Id: s_rpc,v 11.18 2002/08/15 15:22:09 bostic Exp $
+#
+# Build the automatically generated RPC files
+
+echo "Building RPC client/server files"
+
+. ./RELEASE
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+client_file=../rpc_client/gen_client.c
+ctmpl_file=./template/gen_client_ret
+dbinc_file=../dbinc/db.in
+defs_file=../dbinc_auto/rpc_defs.in
+header_file=../dbinc_auto/db_server.h
+proc_file=../rpc_server/c/db_server_proc.c
+rpcclnt_file=../rpc_client/db_server_clnt.c
+rpcsvc_file=../rpc_server/c/db_server_svc.c
+rpcxdr_file=../rpc_server/c/db_server_xdr.c
+sed_file=../rpc_server/c/db_server_proc.sed
+server_file=../rpc_server/c/gen_db_server.c
+stmpl_file=./template/db_server_proc
+xdr_file=../rpc_server/db_server.x
+
+rm -f $client_file \
+ $ctmpl_file \
+ $header_file \
+ $rpcclnt_file \
+ $rpcsvc_file \
+ $proc_file \
+ $rpcxdr_file \
+ $sed_file \
+ $server_file \
+ $stmpl_file \
+ $xdr_file
+
+#
+# Generate client/server/XDR code
+#
+xidsize=\
+`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file`
+
+awk -f gen_rpc.awk \
+ -v major=$DB_VERSION_MAJOR \
+ -v minor=$DB_VERSION_MINOR \
+ -v xidsize=$xidsize \
+ -v client_file=$client_file \
+ -v ctmpl_file=$ctmpl_file \
+ -v sed_file=$sed_file \
+ -v server_file=$server_file \
+ -v stmpl_file=$stmpl_file \
+ -v xdr_file=$xdr_file < ../rpc_server/rpc.src
+chmod 444 $client_file $server_file
+
+#
+# Now run rpcgen to generate all our sources from the XDR file
+#
+rpcgen -h $xdr_file > $header_file
+rpcgen -l $xdr_file > $rpcclnt_file
+rpcgen -s tcp $xdr_file > $rpcsvc_file
+rpcgen -c $xdr_file > $rpcxdr_file
+
+#
+# Run various server files through sed.
+#
+cat <<ENDOFSEDTEXT>$t
+s/^#include[ ]"db_server.h"/#include "db_config.h"\\
+\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc\\/rpc.h>\\
+\\#include <rpc\\/pmap_clnt.h>/
+/^#include <netinet.in.h>/a\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"\\
+\\#include "dbinc/db_server_int.h"\\
+\\#include "dbinc_auto/rpc_server_ext.h"
+/^ return;/i\\
+\\ __dbsrv_timeout(0);
+s/svc_sendreply(transp, xdr_void,/svc_sendreply(transp, (xdrproc_t)xdr_void,/
+s/svc_getargs(transp, xdr_argument, &argument)/svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/svc_sendreply(transp, xdr_result, result)/svc_sendreply(transp, (xdrproc_t)xdr_result, result)/
+s/svc_freeargs(transp, xdr_argument, &argument)/svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/^main/void __dbsrv_main/
+ENDOFSEDTEXT
+sed -f $t $rpcsvc_file > ${rpcsvc_file}.new
+mv ${rpcsvc_file}.new $rpcsvc_file
+
+sed -f $sed_file ${proc_file}.in > ${proc_file}
+
+# Run rpcgen files through sed to add HAVE_RPC ifdef and appropriate
+# includes.
+cat <<ENDOFSEDTEXT>$t
+1i\\
+\\#include "db_config.h"\\
+\\
+\\#ifdef HAVE_RPC
+/^#include "db_server.h"/c\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc/rpc.h>\\
+\\
+\\#include <strings.h>\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"
+\$a\\
+\\#endif /* HAVE_RPC */
+ENDOFSEDTEXT
+
+sed -f $t $rpcxdr_file > ${rpcxdr_file}.new
+mv ${rpcxdr_file}.new $rpcxdr_file
+sed -f $t $rpcclnt_file > ${rpcclnt_file}.new
+mv ${rpcclnt_file}.new $rpcclnt_file
+
+# Copy the DB_RPC SERVER #defines into a separate file so
+# they can be part of db.h.
+msgc="/* DO NOT EDIT: automatically built by dist/s_rpc. */"
+(echo "" && echo "$msgc" &&
+ sed -n -e "/DB_RPC_SERVER/p" $header_file) > $defs_file
+
+# Fix up the header file:
+# Remove the DB_RPC_SERVER #defines.
+# Remove the <rpc/rpc.h> include, it needs to be included earlier
+# than that.
+sed -e "/DB_RPC_SERVER/d"\
+ -e "/^#include.*<rpc\/rpc.h>/d" $header_file > ${header_file}.new
+mv ${header_file}.new $header_file
+
+chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file
diff --git a/storage/bdb/dist/s_symlink b/storage/bdb/dist/s_symlink
new file mode 100755
index 00000000000..8da49ca0c75
--- /dev/null
+++ b/storage/bdb/dist/s_symlink
@@ -0,0 +1,59 @@
+#!/bin/sh -
+# $Id: s_symlink,v 1.28 2002/08/18 21:15:45 bostic Exp $
+
+echo 'Creating Berkeley DB source tree symbolic links...'
+. ./RELEASE
+
+build()
+{
+ echo " $1 -> $2"
+ (cd ../`dirname $1` && rm -f `basename $1` && ln -s $2 `basename $1`)
+}
+
+build btree/tags ../dist/tags
+build build_unix/tags ../dist/tags
+build clib/tags ../dist/tags
+build common/tags ../dist/tags
+#build crypto/tags ../dist/tags
+build cxx/tags ../dist/tags
+build db/tags ../dist/tags
+build db185/tags ../dist/tags
+build db_archive/tags ../dist/tags
+build db_checkpoint/tags ../dist/tags
+build db_deadlock/tags ../dist/tags
+build db_dump/tags ../dist/tags
+build db_dump185/tags ../dist/tags
+build db_load/tags ../dist/tags
+build db_printlog/tags ../dist/tags
+build db_recover/tags ../dist/tags
+build db_stat/tags ../dist/tags
+build db_upgrade/tags ../dist/tags
+build db_verify/tags ../dist/tags
+build dbinc/tags ../dist/tags
+build dbinc_auto/tags ../dist/tags
+build dbm/tags ../dist/tags
+build dbreg/tags ../dist/tags
+build env/tags ../dist/tags
+#build examples_c/tags ../dist/tags
+#build examples_cxx/tags ../dist/tags
+#build examples_java java/src/com/sleepycat/examples
+build fileops/tags ../dist/tags
+build hash/tags ../dist/tags
+build hmac/tags ../dist/tags
+build hsearch/tags ../dist/tags
+build libdb_java/tags ../dist/tags
+build lock/tags ../dist/tags
+build log/tags ../dist/tags
+build mp/tags ../dist/tags
+build mutex/tags ../dist/tags
+build os/tags ../dist/tags
+build os_vxworks/tags ../dist/tags
+build os_win32/tags ../dist/tags
+build qam/tags ../dist/tags
+build rep/tags ../dist/tags
+build rpc_client/tags ../dist/tags
+build rpc_server/tags ../dist/tags
+build tcl/tags ../dist/tags
+build test_server/tags ../dist/tags
+build txn/tags ../dist/tags
+build xa/tags ../dist/tags
diff --git a/storage/bdb/dist/s_tags b/storage/bdb/dist/s_tags
new file mode 100755
index 00000000000..18b6025aa86
--- /dev/null
+++ b/storage/bdb/dist/s_tags
@@ -0,0 +1,62 @@
+#!/bin/sh -
+# $Id: s_tags,v 1.16 2002/03/28 20:13:07 krinsky Exp $
+#
+# Build tags files.
+
+. ./RELEASE
+
+files="../dbinc/*.h \
+ ../dbinc/*.in \
+ ../btree/*.[ch] \
+ ../clib/*.[ch] \
+ ../common/*.[ch] \
+# ../crypto/*.[ch] \
+ ../crypto/mersenne/*.[ch] \
+ ../crypto/rijndael/*.[ch] \
+ ../db/*.[ch] \
+ ../db185/*.[ch] \
+ ../dbm/*.[ch] \
+ ../dbreg/*.[ch] \
+ ../env/*.[ch] \
+ ../fileops/*.[ch] \
+ ../hash/*.[ch] \
+ ../hmac/*.[ch] \
+ ../hsearch/*.[ch] \
+ ../lock/*.[ch] \
+ ../log/*.[ch] \
+ ../mp/*.[ch] \
+ ../mutex/*.[ch] \
+ ../os/*.[ch] \
+ ../qam/*.[ch] \
+ ../rep/*.[ch] \
+ ../rpc_client/*.[ch] \
+ ../rpc_server/c/*.[ch] \
+ ../tcl/*.[ch] \
+ ../txn/*.[ch] \
+ ../xa/*.[ch] \
+ ../cxx/*.cpp \
+ ../libdb_java/*.[ch]"
+
+f=tags
+echo "Building $f"
+rm -f $f
+
+# Figure out what flags this ctags accepts.
+flags=""
+if ctags -d ../db/db.c 2>/dev/null; then
+ flags="-d $flags"
+fi
+if ctags -t ../db/db.c 2>/dev/null; then
+ flags="-t $flags"
+fi
+if ctags -w ../db/db.c 2>/dev/null; then
+ flags="-w $flags"
+fi
+
+ctags $flags $files 2>/dev/null
+chmod 444 $f
+
+#f=../test_perf/tags
+#echo "Building $f"
+#(cd ../test_perf && ctags $flags *.[ch] 2>/dev/null)
+#chmod 444 $f
diff --git a/storage/bdb/dist/s_test b/storage/bdb/dist/s_test
new file mode 100755
index 00000000000..16f3b9712d0
--- /dev/null
+++ b/storage/bdb/dist/s_test
@@ -0,0 +1,92 @@
+#!/bin/sh -
+# $Id: s_test,v 1.24 2002/08/09 02:24:58 bostic Exp $
+#
+# Build the Tcl test files.
+
+msg1="# Automatically built by dist/s_test; may require local editing."
+msg2="# Automatically built by dist/s_test; may require local editing."
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. ./RELEASE
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path @TCL_TCLSH@" && \
+ echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@MODSUFFIX@" && \
+ echo "" && \
+ echo "set rpc_server localhost" && \
+ echo "set rpc_path ." && \
+ echo "set rpc_testdir \$rpc_path/TESTDIR" && \
+ echo "" && \
+ echo "set src_root @srcdir@/.." && \
+ echo "set test_path @srcdir@/../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL \"@db_cv_path_kill@\"") > $t
+
+f=../test/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
+ echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
+ echo "" && \
+ echo "set src_root .." && \
+ echo "set test_path ../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL ./dbkill.exe") > $t
+
+f=../build_win32/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build the test directory TESTS file.
+(echo $msg2;
+cat `egrep -l '^#[ ][ ]*TEST' ../test/*.tcl` |
+sed -e '/^#[ ][ ]*TEST/!{' \
+ -e 's/.*//' \
+ -e '}' |
+cat -s |
+sed -e '/TEST/{' \
+ -e 's/^#[ ][ ]*TEST[ ]*//' \
+ -e 's/^ //' \
+ -e 'H' \
+ -e 'd' \
+ -e '}' \
+ -e 's/.*//' \
+ -e x \
+ -e 's/\n/__LINEBREAK__/g' |
+sort |
+sed -e 's/__LINEBREAK__/\
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\
+/' \
+ -e 's/__LINEBREAK__/\
+ /g' |
+sed -e 's/^[ ][ ]*$//') > $t
+
+f=../test/TESTS
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/storage/bdb/dist/s_vxworks b/storage/bdb/dist/s_vxworks
new file mode 100644
index 00000000000..05c2599d02c
--- /dev/null
+++ b/storage/bdb/dist/s_vxworks
@@ -0,0 +1,324 @@
+#!/bin/sh -
+# $Id: s_vxworks,v 1.37 2002/08/19 21:27:06 bostic Exp $
+#
+# Build the VxWorks files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */"
+
+. ./RELEASE
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+vxfilelist=/tmp/__db_c
+
+trap 'rm -f $s $t $vxfilelist ; exit 0' 0
+trap 'rm -f $s $t $vxfilelist ; exit 1' 1 2 3 13 15
+
+# Build the VxWorks automatically generated files.
+f=../build_vxworks/db.h
+cat <<ENDOFSEDTEXT > $s
+/extern "C" {/{
+n
+n
+i\\
+\\
+/* Tornado 2 does not provide a standard C pre-processor #define. */\\
+#ifndef __vxworks\\
+#define __vxworks\\
+#endif
+}
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@int16_decl@/d
+/@int32_decl@/d
+/@u_char_decl@/d
+/@u_short_decl@/d
+/@u_int_decl@/d
+/@u_long_decl@/d
+/@ssize_t_decl@/d
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
+ENDOFSEDTEXT
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_vxworks/db_int.h
+cat <<ENDOFSEDTEXT > $s
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\/\\\\\\\\\\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_vxworks/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" vx_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build a sed script that will change a "standard" DB utility into
+# VxWorks-compatible code.
+transform()
+{
+ # Build a sed script that will add argument parsing support and
+ # rename all of the functions to be private to this file.
+cat <<ENDOFSEDTEXT
+/^main(argc, argv)$/{
+i\\
+$1(args)\\
+\\ char *args;\\
+{\\
+\\ int argc;\\
+\\ char **argv;\\
+\\
+\\ __db_util_arg("$1", args, &argc, &argv);\\
+\\ return ($1_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);\\
+}\\
+\\
+#include <stdio.h>\\
+#define ERROR_RETURN ERROR\\
+\\
+int\\
+$1_main(argc, argv)
+d
+}
+/^ while ((ch = getopt/i\\
+\\ __db_getopt_reset = 1;
+/^[ ]*extern int optind;/s/;/, __db_getopt_reset;/
+ENDOFSEDTEXT
+
+ # Replace all function names with VxWorks safe names.
+ # Function names are:
+ # Tokens starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ # Replace:
+ # Matches preceded by a non-C-token character and immediately
+ # followed by an opening parenthesis.
+ # Matches preceded by a non-C-token character and immediately
+ # followed by " __P".
+ # Matches starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ for k in `sed -e 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)(.*$/\1/p' -e d $2`; do
+ echo "s/\([^a-zA-Z0-9_]\)\($k(\)/\1$1_\2/g"
+ echo "s/\([^a-zA-Z0-9_]\)\($k[ ]__P\)/\1$1_\2/g"
+ echo "s/^\($k(\)/$1_\1/g"
+ done
+
+ # There is a special case the rules above don't catch:
+ # a txn_compare function used as an argument to qsort(3).
+ # a print_app_record function used as argument to
+ # dbenv->set_app_dispatch).
+ echo "s/, txn_compare);/, db_stat_txn_compare);/"
+ echo "s/, print_app_record)) /, db_printlog_print_app_record)) /"
+
+ # We convert the ex_access sample into dbdemo for VxWorks.
+ echo 's/progname = "ex_access";/progname = "dbdemo";/'
+
+ # The example programs have to load db_int.h, not db.h -- else
+ # they won't have the right Berkeley DB prototypes for getopt
+ # and friends.
+ echo '/#include.*db.h/c\'
+ echo '#include <db_config.h>\'
+ echo '#include <db_int.h>'
+}
+
+PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify ex_access"
+
+# Build VxWorks versions of the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ transform $target $dir/$i.c > $s
+ sed -f $s < $dir/$i.c > $t
+
+ f=../build_vxworks/$target/$target.c
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build VxWorks Tornado 2.0 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ sed "s/__DB_APPLICATION_NAME__/$target/g" < vx_2.0/wpj.in > $t
+ f=../build_vxworks/$target/$target.wpj
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build the list of files VxWorks knows about.
+sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in |
+ egrep -w vx |
+ sed 's/[ ].*//' > $vxfilelist
+
+# Build VxWorks Tornado 2.0 project files for the library itself.
+(cat vx_2.0/BerkeleyDB.wpj
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> FILE_\$(PRJ_DIR)/../$i"
+ echo "${o}_dependDone"
+ echo "TRUE"
+ echo "<END>"
+ echo
+ echo "${o}_dependencies"
+ echo "\$(PRJ_DIR)/db_config.h \\"
+ echo " \$(PRJ_DIR)/db_int.h \\"
+ echo " \$(PRJ_DIR)/db.h"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_tool"
+ echo "C/C++ compiler"
+ echo "<END>"
+ echo
+done
+echo "<BEGIN> PROJECT_FILES"
+sed -e '$!s/$/ \\/' \
+ -e 's/^/$(PRJ_DIR)\/..\//' \
+ -e '1!s/^/ /' < $vxfilelist
+echo "<END>"
+echo
+echo "<BEGIN> userComments"
+echo "BerkeleyDB"
+echo "<END>") > $t
+f=../build_vxworks/BerkeleyDB.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build VxWorks Tornado 3.1 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ cp vx_3.1/Makefile.custom $t
+ f=../build_vxworks/$target/$target/Makefile.custom
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+ for j in component.cdf component.wpj; do
+ #
+ # Some parts of the component files needs to have the
+ # name in all capitals. Sigh.
+ #
+ z=`echo $target | tr "a-z" "A-Z"`
+ sed -e "s/__DB_APPLICATION_NAME__/$target/g" \
+ -e "s/__DB_CAPAPPL_NAME__/$z/g" < vx_3.1/$j > $t
+ f=../build_vxworks/$target/$target/$j
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+ done
+done
+
+# Build VxWorks Tornado 3.1 project files for the library itself.
+cp vx_3.1/Makefile.custom $t
+f=../build_vxworks/BerkeleyDB/Makefile.custom
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/cdf.1
+echo -n " MODULES"
+for i in `cat $vxfilelist`; do
+ echo " `basename $i .c`.o"
+done | sort | sed -e '$!s/$/ \\/'
+cat vx_3.1/cdf.2
+for i in `cat $vxfilelist`; do
+ b="`basename $i .c`.o"
+ echo "Module $b {"
+ echo
+ echo " NAME $b"
+ echo " SRC_PATH_NAME \$(PRJ_DIR)/../../$i"
+ echo "}"
+ echo
+done
+cat vx_3.1/cdf.3)> $t
+f=../build_vxworks/BerkeleyDB/component.cdf
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/wpj.1
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.2
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.release_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.3
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUMgnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.4
+sort $vxfilelist |
+sed -e 's/^/$(PRJ_DIR)\/..\/..\//' \
+ -e '1!s/^/ /' \
+ -e '$!s/$/ \\/'
+cat vx_3.1/wpj.5) > $t
+f=../build_vxworks/BerkeleyDB/component.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/storage/bdb/dist/s_win32 b/storage/bdb/dist/s_win32
new file mode 100755
index 00000000000..207978b82bb
--- /dev/null
+++ b/storage/bdb/dist/s_win32
@@ -0,0 +1,108 @@
+#!/bin/sh -
+# $Id: s_win32,v 1.25 2002/05/20 19:18:14 bostic Exp $
+#
+# Build Windows/32 include files.
+
+msgc="/* DO NOT EDIT: automatically built by dist/s_win32. */"
+msgw="; DO NOT EDIT: automatically built by dist/s_win32."
+
+. ./RELEASE
+
+s=/tmp/__db_a$$
+t=/tmp/__db_b$$
+rm -f $s $t
+
+trap 'rm -f $s $t ; exit 1' 1 2 3 13 15
+
+# Build the Win32 automatically generated files.
+f=../build_win32/db.h
+cat <<ENDOFSEDTEXT > $s
+s/@u_int8_decl@/typedef unsigned char u_int8_t;/
+s/@int16_decl@/typedef short int16_t;/
+s/@u_int16_decl@/typedef unsigned short u_int16_t;/
+s/@int32_decl@/typedef int int32_t;/
+s/@u_int32_decl@/typedef unsigned int u_int32_t;/
+/@u_char_decl@/{
+ i\\
+#if !defined(_WINSOCKAPI_)
+ s/@u_char_decl@/typedef unsigned char u_char;/
+}
+s/@u_short_decl@/typedef unsigned short u_short;/
+s/@u_int_decl@/typedef unsigned int u_int;/
+/@u_long_decl@/{
+ s/@u_long_decl@/typedef unsigned long u_long;/
+ a\\
+#endif
+}
+/@ssize_t_decl@/{
+ i\\
+#if defined(_WIN64)\\
+typedef __int64 ssize_t;\\
+#else\\
+typedef int ssize_t;\\
+#endif
+ d
+}
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
+s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
+ENDOFSEDTEXT
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_cxx.h
+cat <<ENDOFSEDTEXT > $s
+s/@cxx_have_stdheaders@/#define HAVE_CXX_STDHEADERS 1/
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_int.h
+cat <<ENDOFSEDTEXT > $s
+s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\\\\\\\\\\/:\"/
+s/@db_align_t_decl@/typedef unsigned long db_align_t;/
+s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
+ENDOFSEDTEXT
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/libdb.rc
+cat <<ENDOFSEDTEXT > $s
+s/%MAJOR%/$DB_VERSION_MAJOR/
+s/%MINOR%/$DB_VERSION_MINOR/
+s/%PATCH%/$DB_VERSION_PATCH/
+ENDOFSEDTEXT
+sed -f $s ../build_win32/libdbrc.src > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/libdb.def
+(echo $msgw &&
+ echo &&
+ echo \
+ "DESCRIPTION 'Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR Library'" &&
+ echo &&
+ echo EXPORTS;
+a=1
+for i in `sed -e '/^$/d' -e '/^#/d' win_exports.in`; do
+ echo " $i @$a"
+ a=`expr $a + 1`
+done) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+rm -f $s $t
diff --git a/storage/bdb/dist/s_win32_dsp b/storage/bdb/dist/s_win32_dsp
new file mode 100644
index 00000000000..af5551ec248
--- /dev/null
+++ b/storage/bdb/dist/s_win32_dsp
@@ -0,0 +1,109 @@
+#!/bin/sh -
+# $Id: s_win32_dsp,v 1.8 2002/03/26 23:37:55 bostic Exp $
+#
+# Build Windows/32 .dsp files.
+
+. ./RELEASE
+
+BUILDDIR=../build_win32
+SRCFILES=srcfiles.in
+
+create_dsp()
+{
+ projname="$1" # name of the .dsp file
+ match="$2" # the string used to egrep the $sources file
+ sources="$3" # a modified version of $SRCFILES to facilitate matches
+ dsptemplate="$4" # overall template file for the .dsp
+ srctemplate="$5" # template file for the src file fragments
+
+ dspoutput=$BUILDDIR/$projname.dsp
+
+ rm -f $dspoutput.insert
+ for srcpath in `egrep "$match" $sources | sed -e 's/[ ].*//'`
+ do
+ # take the path name and break it up, converting / to \\.
+ # so many backslashes needed because of shell quoting and
+ # sed quoting -- we'll end up with two backslashes for every
+ # forward slash, but we need that when feeding that to the
+ # later sed command.
+ set - `echo $srcpath | sed -e 's;\(.*\)/;../\\1 ;' \
+ -e 's;../build_win32;.;' \
+ -e 's;/;\\\\\\\\;g'`
+ srcdir="$1"
+ srcfile="$2"
+ sed -e "s/@srcdir@/$srcdir/g" \
+ -e "s/@srcfile@/$srcfile/g" \
+ < $srctemplate >> $dspoutput.insert
+ done
+ # We need exactly one space after the 'r' modifier
+ # See 5.9 in http://www.student.northpark.edu/pemente/sed/sedfaq.txt
+ sed -e "/@SOURCE_FILES@/r $dspoutput.insert" \
+ -e "/@SOURCE_FILES@/d" \
+ -e "s/@project_name@/$projname/g" \
+ -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \
+ < $dsptemplate > $dspoutput.new
+
+ # Set the file mode to 644 because the VC++ IDE needs a writeable file
+ # in our development environment.
+ cmp $dspoutput.new $dspoutput > /dev/null 2>&1 ||
+ (echo "Building $dspoutput" && rm -f $dspoutput &&
+ cp $dspoutput.new $dspoutput && chmod 664 $dspoutput)
+ rm -f $dspoutput.insert $dspoutput.new
+}
+
+TMPA=/tmp/swin32dsp$$a
+trap "rm -f $TMPA; exit 1" 1 2 3 15
+
+# create a copy of the srcfiles with comments and empty lines removed.
+# add a space at the end of each list of modules so that each module
+# can be unambiguously matched e.g. ' dynamic '
+sed -e "s/#.*$//" \
+ -e "/^[ ]*$/d" \
+ -e "s/[ ][ ]*/ /" \
+ -e "s/[ ]*$//" \
+ -e "/[ ]/!d" \
+ -e "s/$/ /" < $SRCFILES > $TMPA
+
+# get a list of all modules mentioned
+#
+MODULES="`sed -e 's/^[^ ]* //' < $TMPA \
+ | tr ' ' '\012' | sort | uniq`"
+
+for module in $MODULES
+do
+ case "$module" in
+ dynamic )
+ create_dsp db_dll " $module " $TMPA \
+ $BUILDDIR/dynamic_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ java )
+ create_dsp db_java " $module " $TMPA \
+ $BUILDDIR/java_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ tcl )
+ create_dsp db_tcl " $module " $TMPA \
+ $BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ testutil )
+ create_dsp db_test " $module " $TMPA \
+ $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ static )
+ create_dsp db_static " $module " $TMPA \
+ $BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ app=* )
+ appname=`echo $module | sed -e 's/^app=//'`
+ create_dsp $appname " $module " $TMPA \
+ $BUILDDIR/app_dsp.src $BUILDDIR/srcfile_dsp.src
+ ;;
+ vx )
+ ;;
+ * )
+ echo "s_win32_dsp: module name $module in $SRCFILES is unknown type"
+ ;;
+ esac
+done
+
+rm -f $TMPA
diff --git a/storage/bdb/dist/srcfiles.in b/storage/bdb/dist/srcfiles.in
new file mode 100644
index 00000000000..54aeea0c1bc
--- /dev/null
+++ b/storage/bdb/dist/srcfiles.in
@@ -0,0 +1,332 @@
+# $Id: srcfiles.in,v 1.59 2002/08/29 14:22:21 margo Exp $
+#
+# This is an input file for the s_win32_dsp and s_vxworks scripts. It lists
+# the source files in the Berkeley DB tree and notes which are used to build
+# the Win/32 and VxWorks libraries.
+#
+# Please keep this list sorted alphabetically!
+#
+# Each non-blank, non-comment line is of the form
+# filename module [ module ...]
+#
+# The possible modules, including the name of the project (.dsp) file:
+#
+# app=NAME Linked into application NAME.exe (db_NAME.dsp)
+# dynamic File is in the Windows DLL (db_dll.dsp)
+# java File is in the Windows Java DLL (db_java.dsp)
+# static File is in the Windows static library (db_static.dsp)
+# tcl File is in the Windows tcl DLL (db_tcl.dsp)
+# testutil File is used for Windows testing (db_test.dsp)
+# vx File is in the VxWorks library.
+
+btree/bt_compare.c dynamic static vx
+btree/bt_conv.c dynamic static vx
+btree/bt_curadj.c dynamic static vx
+btree/bt_cursor.c dynamic static vx
+btree/bt_delete.c dynamic static vx
+btree/bt_method.c dynamic static vx
+btree/bt_open.c dynamic static vx
+btree/bt_put.c dynamic static vx
+btree/bt_rec.c dynamic static vx
+btree/bt_reclaim.c dynamic static vx
+btree/bt_recno.c dynamic static vx
+btree/bt_rsearch.c dynamic static vx
+btree/bt_search.c dynamic static vx
+btree/bt_split.c dynamic static vx
+btree/bt_stat.c dynamic static vx
+btree/bt_upgrade.c dynamic static vx
+btree/bt_verify.c dynamic static vx
+btree/btree_auto.c dynamic static vx
+build_vxworks/db_archive/db_archive.c
+build_vxworks/db_checkpoint/db_checkpoint.c
+build_vxworks/db_deadlock/db_deadlock.c
+build_vxworks/db_dump/db_dump.c
+build_vxworks/db_load/db_load.c
+build_vxworks/db_printlog/db_printlog.c
+build_vxworks/db_recover/db_recover.c
+build_vxworks/db_stat/db_stat.c
+build_vxworks/db_upgrade/db_upgrade.c
+build_vxworks/db_verify/db_verify.c
+build_vxworks/dbdemo/dbdemo.c
+build_win32/dbkill.cpp testutil
+build_win32/dllmain.c dynamic
+build_win32/libdb.def dynamic
+build_win32/libdb.rc dynamic
+build_win32/libdb_tcl.def tcl
+clib/getcwd.c
+clib/getopt.c vx
+clib/memcmp.c
+clib/memmove.c
+clib/raise.c
+clib/snprintf.c vx
+clib/strcasecmp.c dynamic static vx
+clib/strdup.c vx
+clib/strerror.c
+clib/vsnprintf.c vx
+common/db_byteorder.c dynamic static vx
+common/db_err.c dynamic static vx
+common/db_getlong.c dynamic static vx
+common/db_idspace.c dynamic static vx
+common/db_log2.c dynamic static vx
+common/util_arg.c vx
+common/util_cache.c dynamic static vx
+common/util_log.c dynamic static vx
+common/util_sig.c dynamic static vx
+cxx/cxx_db.cpp dynamic static
+cxx/cxx_dbc.cpp dynamic static
+cxx/cxx_dbt.cpp dynamic static
+cxx/cxx_env.cpp dynamic static
+cxx/cxx_except.cpp dynamic static
+cxx/cxx_lock.cpp dynamic static
+cxx/cxx_logc.cpp dynamic static
+cxx/cxx_mpool.cpp dynamic static
+cxx/cxx_txn.cpp dynamic static
+db/crdel_auto.c dynamic static vx
+db/crdel_rec.c dynamic static vx
+db/db.c dynamic static vx
+db/db_am.c dynamic static vx
+db/db_auto.c dynamic static vx
+db/db_cam.c dynamic static vx
+db/db_conv.c dynamic static vx
+db/db_dispatch.c dynamic static vx
+db/db_dup.c dynamic static vx
+db/db_iface.c dynamic static vx
+db/db_join.c dynamic static vx
+db/db_meta.c dynamic static vx
+db/db_method.c dynamic static vx
+db/db_open.c dynamic static vx
+db/db_overflow.c dynamic static vx
+db/db_pr.c dynamic static vx
+db/db_rec.c dynamic static vx
+db/db_reclaim.c dynamic static vx
+db/db_remove.c dynamic static vx
+db/db_rename.c dynamic static vx
+db/db_ret.c dynamic static vx
+db/db_truncate.c dynamic static vx
+db/db_upg.c dynamic static vx
+db/db_upg_opd.c dynamic static vx
+db/db_vrfy.c dynamic static vx
+db/db_vrfyutil.c dynamic static vx
+db185/db185.c
+db_archive/db_archive.c app=db_archive
+db_checkpoint/db_checkpoint.c app=db_checkpoint
+db_deadlock/db_deadlock.c app=db_deadlock
+db_dump/db_dump.c app=db_dump
+db_dump185/db_dump185.c
+db_load/db_load.c app=db_load
+db_printlog/db_printlog.c app=db_printlog
+db_recover/db_recover.c app=db_recover
+db_stat/db_stat.c app=db_stat
+db_upgrade/db_upgrade.c app=db_upgrade
+db_verify/db_verify.c app=db_verify
+dbm/dbm.c dynamic static
+dbreg/dbreg.c dynamic static vx
+dbreg/dbreg_auto.c dynamic static vx
+dbreg/dbreg_rec.c dynamic static vx
+dbreg/dbreg_util.c dynamic static vx
+env/db_salloc.c dynamic static vx
+env/db_shash.c dynamic static vx
+env/env_file.c dynamic static vx
+env/env_method.c dynamic static vx
+env/env_open.c dynamic static vx
+env/env_recover.c dynamic static vx
+env/env_region.c dynamic static vx
+examples_c/bench_001.c
+examples_c/ex_access.c app=ex_access
+examples_c/ex_apprec/ex_apprec.c
+examples_c/ex_apprec/ex_apprec_auto.c
+examples_c/ex_apprec/ex_apprec_rec.c
+examples_c/ex_btrec.c app=ex_btrec
+examples_c/ex_dbclient.c
+examples_c/ex_env.c app=ex_env
+examples_c/ex_lock.c app=ex_lock
+examples_c/ex_mpool.c app=ex_mpool
+examples_c/ex_repquote/ex_rq_client.c
+examples_c/ex_repquote/ex_rq_main.c
+examples_c/ex_repquote/ex_rq_master.c
+examples_c/ex_repquote/ex_rq_net.c
+examples_c/ex_repquote/ex_rq_util.c
+examples_c/ex_thread.c
+examples_c/ex_tpcb.c app=ex_tpcb
+examples_cxx/AccessExample.cpp app=excxx_access
+examples_cxx/BtRecExample.cpp app=excxx_btrec
+examples_cxx/EnvExample.cpp app=excxx_env
+examples_cxx/LockExample.cpp app=excxx_lock
+examples_cxx/MpoolExample.cpp app=excxx_mpool
+examples_cxx/TpcbExample.cpp app=excxx_tpcb
+fileops/fileops_auto.c dynamic static vx
+fileops/fop_basic.c dynamic static vx
+fileops/fop_rec.c dynamic static vx
+fileops/fop_util.c dynamic static vx
+hash/hash.c dynamic static vx
+hash/hash_auto.c dynamic static vx
+hash/hash_conv.c dynamic static vx
+hash/hash_dup.c dynamic static vx
+hash/hash_func.c dynamic static vx
+hash/hash_meta.c dynamic static vx
+hash/hash_method.c dynamic static vx
+hash/hash_open.c dynamic static vx
+hash/hash_page.c dynamic static vx
+hash/hash_rec.c dynamic static vx
+hash/hash_reclaim.c dynamic static vx
+hash/hash_stat.c dynamic static vx
+hash/hash_upgrade.c dynamic static vx
+hash/hash_verify.c dynamic static vx
+hmac/hmac.c dynamic static vx
+hmac/sha1.c dynamic static vx
+hsearch/hsearch.c dynamic static vx
+libdb_java/java_Db.c java
+libdb_java/java_DbEnv.c java
+libdb_java/java_DbLock.c java
+libdb_java/java_DbLogc.c java
+libdb_java/java_DbLsn.c java
+libdb_java/java_DbTxn.c java
+libdb_java/java_DbUtil.c java
+libdb_java/java_Dbc.c java
+libdb_java/java_Dbt.c java
+libdb_java/java_info.c java
+libdb_java/java_locked.c java
+libdb_java/java_stat_auto.c java
+libdb_java/java_util.c java
+lock/lock.c dynamic static vx
+lock/lock_deadlock.c dynamic static vx
+lock/lock_method.c dynamic static vx
+lock/lock_region.c dynamic static vx
+lock/lock_stat.c dynamic static vx
+lock/lock_util.c dynamic static vx
+log/log.c dynamic static vx
+log/log_archive.c dynamic static vx
+log/log_compare.c dynamic static vx
+log/log_get.c dynamic static vx
+log/log_method.c dynamic static vx
+log/log_put.c dynamic static vx
+mp/mp_alloc.c dynamic static vx
+mp/mp_bh.c dynamic static vx
+mp/mp_fget.c dynamic static vx
+mp/mp_fopen.c dynamic static vx
+mp/mp_fput.c dynamic static vx
+mp/mp_fset.c dynamic static vx
+mp/mp_method.c dynamic static vx
+mp/mp_region.c dynamic static vx
+mp/mp_register.c dynamic static vx
+mp/mp_stat.c dynamic static vx
+mp/mp_sync.c dynamic static vx
+mp/mp_trickle.c dynamic static vx
+mutex/mut_fcntl.c
+mutex/mut_pthread.c
+mutex/mut_tas.c vx
+mutex/mut_win32.c dynamic static
+mutex/mutex.c dynamic static vx
+mutex/tm.c
+os/os_abs.c
+os/os_alloc.c dynamic static vx
+os/os_clock.c vx
+os/os_config.c
+os/os_dir.c vx
+os/os_errno.c vx
+os/os_fid.c vx
+os/os_fsync.c vx
+os/os_handle.c vx
+os/os_id.c dynamic static vx
+os/os_map.c
+os/os_method.c dynamic static vx
+os/os_oflags.c dynamic static vx
+os/os_open.c vx
+os/os_region.c dynamic static vx
+os/os_rename.c vx
+os/os_root.c dynamic static vx
+os/os_rpath.c dynamic static vx
+os/os_rw.c vx
+os/os_seek.c vx
+os/os_sleep.c vx
+os/os_spin.c vx
+os/os_stat.c vx
+os/os_tmpdir.c dynamic static vx
+os/os_unlink.c dynamic static vx
+os_vxworks/os_vx_abs.c vx
+os_vxworks/os_vx_config.c vx
+os_vxworks/os_vx_map.c vx
+os_win32/os_abs.c dynamic static
+os_win32/os_clock.c dynamic static
+os_win32/os_config.c dynamic static
+os_win32/os_dir.c dynamic static
+os_win32/os_errno.c dynamic static
+os_win32/os_fid.c dynamic static
+os_win32/os_fsync.c dynamic static
+os_win32/os_handle.c dynamic static
+os_win32/os_map.c dynamic static
+os_win32/os_open.c dynamic static
+os_win32/os_rename.c dynamic static
+os_win32/os_rw.c dynamic static
+os_win32/os_seek.c dynamic static
+os_win32/os_sleep.c dynamic static
+os_win32/os_spin.c dynamic static
+os_win32/os_stat.c dynamic static
+os_win32/os_type.c dynamic static
+qam/qam.c dynamic static vx
+qam/qam_auto.c dynamic static vx
+qam/qam_conv.c dynamic static vx
+qam/qam_files.c dynamic static vx
+qam/qam_method.c dynamic static vx
+qam/qam_open.c dynamic static vx
+qam/qam_rec.c dynamic static vx
+qam/qam_stat.c dynamic static vx
+qam/qam_upgrade.c dynamic static vx
+qam/qam_verify.c dynamic static vx
+rep/rep_method.c dynamic static vx
+rep/rep_record.c dynamic static vx
+rep/rep_region.c dynamic static vx
+rep/rep_util.c dynamic static vx
+rpc_client/client.c vx
+rpc_client/db_server_clnt.c vx
+rpc_client/gen_client.c vx
+rpc_client/gen_client_ret.c vx
+rpc_server/c/db_server_proc.c
+rpc_server/c/db_server_svc.c
+rpc_server/c/db_server_util.c
+rpc_server/c/db_server_xdr.c vx
+rpc_server/c/gen_db_server.c
+rpc_server/cxx/db_server_cxxproc.cpp
+rpc_server/cxx/db_server_cxxutil.cpp
+tcl/tcl_compat.c tcl
+tcl/tcl_db.c tcl
+tcl/tcl_db_pkg.c tcl
+tcl/tcl_dbcursor.c tcl
+tcl/tcl_env.c tcl
+tcl/tcl_internal.c tcl
+tcl/tcl_lock.c tcl
+tcl/tcl_log.c tcl
+tcl/tcl_mp.c tcl
+tcl/tcl_rep.c tcl
+tcl/tcl_txn.c tcl
+tcl/tcl_util.c tcl
+test_perf/db_perf.c app=db_perf
+test_perf/perf_cache_check.c app=db_perf
+test_perf/perf_checkpoint.c app=db_perf
+test_perf/perf_config.c app=db_perf
+test_perf/perf_dbs.c app=db_perf
+test_perf/perf_debug.c app=db_perf
+test_perf/perf_file.c app=db_perf
+test_perf/perf_key.c app=db_perf
+test_perf/perf_log.c app=db_perf
+test_perf/perf_misc.c app=db_perf
+test_perf/perf_op.c app=db_perf
+test_perf/perf_parse.c app=db_perf
+test_perf/perf_rand.c app=db_perf
+test_perf/perf_spawn.c app=db_perf
+test_perf/perf_thread.c app=db_perf
+test_perf/perf_trickle.c app=db_perf
+test_perf/perf_txn.c app=db_perf
+test_perf/perf_util.c app=db_perf
+test_perf/perf_vx.c
+txn/txn.c dynamic static vx
+txn/txn_auto.c dynamic static vx
+txn/txn_method.c dynamic static vx
+txn/txn_rec.c dynamic static vx
+txn/txn_recover.c dynamic static vx
+txn/txn_region.c dynamic static vx
+txn/txn_stat.c dynamic static vx
+txn/txn_util.c dynamic static vx
+xa/xa.c dynamic static vx
+xa/xa_db.c dynamic static vx
+xa/xa_map.c dynamic static vx
diff --git a/storage/bdb/dist/template/rec_ctemp b/storage/bdb/dist/template/rec_ctemp
new file mode 100644
index 00000000000..2951189c5bd
--- /dev/null
+++ b/storage/bdb/dist/template/rec_ctemp
@@ -0,0 +1,62 @@
+/*
+ * PREF_FUNC_recover --
+ * Recovery function for FUNC.
+ *
+ * PUBLIC: int PREF_FUNC_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ PREF_FUNC_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(PREF_FUNC_print);
+ REC_INTRO(PREF_FUNC_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj b/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj
new file mode 100644
index 00000000000..78684d90067
--- /dev/null
+++ b/storage/bdb/dist/vx_2.0/BerkeleyDB.wpj
@@ -0,0 +1,251 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/.. \
+ -DDIAGNOSTIC \
+ -DDEBUG
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/..
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB.out
+
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM_debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM_release PENTIUM_debug
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
diff --git a/storage/bdb/dist/vx_2.0/wpj.in b/storage/bdb/dist/vx_2.0/wpj.in
new file mode 100644
index 00000000000..2b942bb562c
--- /dev/null
+++ b/storage/bdb/dist/vx_2.0/wpj.in
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+__DB_APPLICATION_NAME__.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/__DB_APPLICATION_NAME__.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE___DB_APPLICATION_NAME__.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependencies
+
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/__DB_APPLICATION_NAME__.c
+<END>
+
+<BEGIN> userComments
+__DB_APPLICATION_NAME__
+<END>
diff --git a/storage/bdb/dist/vx_3.1/Makefile.custom b/storage/bdb/dist/vx_3.1/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/storage/bdb/dist/vx_3.1/cdf.1 b/storage/bdb/dist/vx_3.1/cdf.1
new file mode 100644
index 00000000000..17db06f7e61
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/cdf.1
@@ -0,0 +1,12 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
diff --git a/storage/bdb/dist/vx_3.1/cdf.2 b/storage/bdb/dist/vx_3.1/cdf.2
new file mode 100644
index 00000000000..76f123af9fb
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/cdf.2
@@ -0,0 +1,9 @@
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
diff --git a/storage/bdb/dist/vx_3.1/cdf.3 b/storage/bdb/dist/vx_3.1/cdf.3
new file mode 100644
index 00000000000..a3146ced95a
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/cdf.3
@@ -0,0 +1,2 @@
+/* Parameter information */
+
diff --git a/storage/bdb/dist/vx_3.1/component.cdf b/storage/bdb/dist/vx_3.1/component.cdf
new file mode 100644
index 00000000000..91edaa87853
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE___DB_CAPAPPL_NAME__ {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES __DB_APPLICATION_NAME__.o
+ NAME __DB_APPLICATION_NAME__
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module __DB_APPLICATION_NAME__.o {
+
+ NAME __DB_APPLICATION_NAME__.o
+ SRC_PATH_NAME $PRJ_DIR/../__DB_APPLICATION_NAME__.c
+}
+
+/* Parameter information */
+
diff --git a/storage/bdb/dist/vx_3.1/component.wpj b/storage/bdb/dist/vx_3.1/component.wpj
new file mode 100644
index 00000000000..01c51c1b97f
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/storage/bdb/dist/vx_3.1/wpj.1 b/storage/bdb/dist/vx_3.1/wpj.1
new file mode 100644
index 00000000000..414b4e8fa35
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/wpj.1
@@ -0,0 +1,22 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
diff --git a/storage/bdb/dist/vx_3.1/wpj.2 b/storage/bdb/dist/vx_3.1/wpj.2
new file mode 100644
index 00000000000..0294f763ef7
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/wpj.2
@@ -0,0 +1,130 @@
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
diff --git a/storage/bdb/dist/vx_3.1/wpj.3 b/storage/bdb/dist/vx_3.1/wpj.3
new file mode 100644
index 00000000000..f06e6253923
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/wpj.3
@@ -0,0 +1,128 @@
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../..
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
diff --git a/storage/bdb/dist/vx_3.1/wpj.4 b/storage/bdb/dist/vx_3.1/wpj.4
new file mode 100644
index 00000000000..84de6ebf359
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/wpj.4
@@ -0,0 +1,135 @@
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
diff --git a/storage/bdb/dist/vx_3.1/wpj.5 b/storage/bdb/dist/vx_3.1/wpj.5
new file mode 100644
index 00000000000..f4056e7e22a
--- /dev/null
+++ b/storage/bdb/dist/vx_3.1/wpj.5
@@ -0,0 +1,22 @@
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/storage/bdb/dist/vx_buildcd b/storage/bdb/dist/vx_buildcd
new file mode 100755
index 00000000000..a94d78db974
--- /dev/null
+++ b/storage/bdb/dist/vx_buildcd
@@ -0,0 +1,119 @@
+#!/bin/sh
+# $Id: vx_buildcd,v 1.6 2001/11/05 21:05:58 sue Exp $
+#
+# Build the Setup SDK CD image on the VxWorks host machine.
+
+. ./RELEASE
+
+B=`pwd`
+B=$B/..
+D=$B/dist/vx_setup
+C=$D/db.CD
+Q=/export/home/sue/SetupSDK
+S=$Q/resource/mfg/setup
+W=sun4-solaris2
+
+symdoc=$D/docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+symdb=$D/windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+rm -rf $D/docs $D/windlink
+mkdir $D/docs $D/windlink $D/windlink/sleepycat
+ln -s $B/docs $symdoc
+ln -s $B $symdb
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+
+#
+# Remove the old CD directory if it is there.
+if test -d $C; then
+ echo "$C cannot exist."
+ echo "As root, execute 'rm -rf $C'"
+ echo "and then rerun the script"
+ exit 1
+fi
+
+#
+# Check for absolute pathnames in the project files.
+# That is bad, but Tornado insists on putting them in
+# whenever you add new files.
+#
+rm -f $t
+f=`find $B/build_vxworks -name \*.wpj -print`
+for i in $f; do
+ grep -l -- "$B" $i >> $t
+done
+if test -s $t; then
+ echo "The following files contain absolute pathnames."
+ echo "They must be fixed before building the CD image:"
+ cat $t
+ exit 1
+fi
+
+#
+# NOTE: We reuse the same sed script over several files.
+#
+cat <<ENDOFSEDTEXT > $s
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/g
+s#@DB_SETUP_DIR@#$D#g
+ENDOFSEDTEXT
+
+f=$D/setup.pool
+(sed -f $s $D/vx_setup.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/README.TXT
+(sed -f $s $D/README.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/CONFIG.TCL
+(sed -f $s $D/CONFIG.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/filelist.demo
+(sed -f $s $D/vx_demofile.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+# Copy the Sleepycat specific files into the SetupSDK area.
+(cd $D && cp README.TXT $S)
+(cd $D && cp LICENSE.TXT $S)
+(cd $D && cp CONFIG.TCL $S/RESOURCE/TCL)
+(cd $D && cp SETUP.BMP $S/RESOURCE/BITMAPS)
+
+#
+# NOTE: The contents of LIB must be on one, long, single line.
+# Even preserving it with a \ doesn't work for htmlBook.
+#
+f=../docs/LIB
+(echo "Building $f" && rm -f $f)
+cat <<ENDOFLIBTEXT >> $f
+{BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat Software Berkeley DB} {<b>BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</b>} {<b><a href="./index.html">BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</a></b>} {Sleepycat BerkeleyDB} {} {} {}
+ENDOFLIBTEXT
+
+#
+# Start generating the file list.
+f=$D/filelist.all
+
+#
+# Just put everything into the image. But we only want to find regular
+# files; we cannot have all the directories listed too.
+#
+# NOTE: This find is overly aggressive in getting files, particularly
+# for the 'windlink/sleepycat' files. We actually end up with 3 sets of the
+# documentation, the "real" ones in 'docs/BerkeleyDB*', the set found
+# via 'windlink/sleepycat/Berk*/docs' and the one found via our symlink in
+# 'windlink/sleepycat/Berk*/dist/vx_setup/docs/Berk*'.
+#
+# However, we waste a little disk space so that the expression below
+# is trivial and we don't have to maintain it as new files/directories
+# are added to DB.
+#
+(cd $D && find docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name \* -type f -print) > $t
+(cd $D && find windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name docs -prune -o -type f -print) >> $t
+(echo "Building $f" && rm -f $f && cp $t $f)
+#
+# Finally build the CD image!
+#
+env PATH=$Q/$W/bin:$PATH QMS_BASE=$Q WIND_HOST_TYPE=$W \
+pool mfg -d $C -v -nokey BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR < $D/setup.pool
diff --git a/storage/bdb/dist/vx_config.in b/storage/bdb/dist/vx_config.in
new file mode 100644
index 00000000000..43fc8eb71f3
--- /dev/null
+++ b/storage/bdb/dist/vx_config.in
@@ -0,0 +1,381 @@
+/* !!!
+ * The CONFIG_TEST option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* !!!
+ * The DEBUG option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* !!!
+ * The DIAGNOSTIC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+#define HAVE_MUTEX_VXWORKS 1
+
+/* Define to 1 to use Windows mutexes. */
+/* #undef HAVE_MUTEX_WIN32 */
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+#define HAVE_SCHED_YIELD 1
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+/* #undef HAVE_SNPRINTF */
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+/* #undef HAVE_STRDUP */
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+/* #undef HAVE_SYS_STAT_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #undef HAVE_SYS_TYPES_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#define HAVE_UNLINK_WITH_OPEN_FAILURE 1
+
+/* Define to 1 if you have the `vsnprintf' function. */
+/* #undef HAVE_VSNPRINTF */
+
+/* Define to 1 if building VxWorks. */
+#define HAVE_VXWORKS 1
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+/* #undef HAVE__FSTATI64 */
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#define optarg __db_Coptarg
+#define opterr __db_Copterr
+#define optind __db_Coptind
+#define optopt __db_Coptopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on VxWorks.
+ */
+#include "vxWorks.h"
diff --git a/storage/bdb/dist/vx_setup/CONFIG.in b/storage/bdb/dist/vx_setup/CONFIG.in
new file mode 100644
index 00000000000..6ccceee7034
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/CONFIG.in
@@ -0,0 +1,10 @@
+#
+# Install configuration file.
+#
+# Note: This file may be modified during the pool manufacturing process to
+# add additional configuration statements. This file is sourced by
+# INSTW32.TCL.
+#
+
+cdromDescSet "Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@"
+
diff --git a/storage/bdb/dist/vx_setup/LICENSE.TXT b/storage/bdb/dist/vx_setup/LICENSE.TXT
new file mode 100644
index 00000000000..7814c679cd7
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/LICENSE.TXT
@@ -0,0 +1,3 @@
+Copyright (c) 1996-2002
+ Sleepycat Software. All rights reserved.
+See the file LICENSE for redistribution information.
diff --git a/storage/bdb/dist/vx_setup/MESSAGES.TCL b/storage/bdb/dist/vx_setup/MESSAGES.TCL
new file mode 100644
index 00000000000..718a67fbc50
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/MESSAGES.TCL
@@ -0,0 +1,651 @@
+# MESSAGES.TCL - All setup strings.
+
+# modification history
+# --------------------
+# 03q,20apr99,bjl added release notes message for backward compatibility
+# page.
+# 03p,12apr99,wmd Add word about simulator in message about the drivers
+# object product.
+# 03o,03mar99,tcy Adjust setup directory size based on platform (fix for
+# SPR 25228)
+# 03n,24feb99,tcy modified DLL update messages
+# 03m,22feb99,tcy modified to align messages
+# 03l,17feb99,tcy modified message in the finish page for program group
+# installation
+# 03k,11feb99,tcy added messages for backward compatibility page
+# 03j,25jan99,tcy added messages from INSTW32.TCL
+# 03i,25jan99,wmd Reword the message for 5010_DRIVERS_INFO.
+# 03h,09dec98,bjl added messages about manufacturers updating patches.
+# 03g,01dec98,wmd Fix typos.
+# 03f,23nov98,tcy warn user to disable virus protection on Welcome screen
+# 03e,19nov98,wmd fixed minor nits in wording.
+# 03d,19nov98,bjl added web site locations for patchinfo.
+# 03c,18nov98,bjl added formatted patch messages for patchinfo file.
+# 03b,12nov98,tcy added message for not saving installation key
+# 03a,10nov98,tcy added warning message for space in destination directory
+# removed message for checking temporary disk space
+# 02z,27oct98,bjl added recommended patch messages, modified required msg.
+# 02y,26oct98,tcy added message for checking temporary disk space
+# 02x,22oct98,wmd fix messages for clarity.
+# 02w,21oct98,wmd fix message for drv/obj.
+# 02v,20oct98,tcy added message for updating system and changed dcom message
+# 02u,20oct98,bjl added tornado registry name entry message.
+# 02t,19oct98,bjl added tornado registry description message.
+# 02s,16oct98,wmd add new message for driver product warning.
+# 02r,16oct98,wmd fixed README.TXT description.
+# 02q,12oct98,tcy removed extraneous "the" from messages
+# 02p,06oct98,tcy added CD description to Welcome page
+# 02o,29sep98,bjl added required patches message 5000_PATCHES_TEXT.
+# 02n,29sep98,wmd add text for readme page
+# 02m,29sep98,tcy refined DLL registration page text
+# 02l,29sep98,tcy changed message for DCOM
+# 02k,26sep98,tcy added messages for DLL and DCOM pages
+# 02j,24sep98,tcy removed "following" from 1080_WARN_4 message.
+# 02i,17sep98,tcy added comment on size of SETUP files to 1140_COMP_SELECT.
+# 02h,17sep98,wmd reword message 1080_WARN_4.
+# 02g,14sep98,tcy changed 1210_FINISH and 1550_USAGE messages
+# 02f,08sep98,tcy warn user library update may take several minutes
+# 02e,01sep98,wmd reword message for installing over tree.
+# added new messages for license agreement pages.
+# 02d,20aug98,wmd added message for license agreeement.
+# 02c,18aug98,tcy added message for zip-file dialog box
+# 02d,04aug98,wmd added newer/older duplicate file warnings.
+# 02c,24jul98,tcy added system check messages
+# 02b,16jul98,wmd add new messages for T-2.
+# 02a,22jul98,tcy moved license messages to LICW32.TCL;
+# removed portMapper messages
+# 01n,09feb98,pdn updated string 1080_WARN_4
+# 01m,08apr97,pdn added new string for remote icon installing
+# fixed spr#8334
+# 01l,08mar97,tcy fixed language in string id 3340
+# 01k,07mar97,tcy added string id 3340
+# 01j,10feb97,pdn added more license messages.
+# 01i,09feb97,pdn implemented variable argument list for strTableGet(),
+# clean up.
+# 01h,17jan97,jmo fixed language in strings
+# 01g,12dec96,tcy merged in TEXT-only strings
+# 01f,12dec96,pdn added 1080_WARN_4 string warning that CD-ROM
+# revision is older than expected.
+# 01e,27nov96,sj added string for warning against installing in
+# the root of windows drive.
+# 01d,18nov96,tcy added strings for text-based installation script
+# 01c,14nov96,pdn substituted function for some global variables
+# 01b,14nov96,sj added strings from Windows installation script
+# 01a,11nov96,pdn written
+
+proc strTableGet {strId args} {
+ global strTable
+ global setupVals
+ global current_file
+
+ if [regexp {^format.*$} $strTable($strId) junk] {
+ return [eval $strTable($strId)]
+ } {
+ return $strTable($strId)
+ }
+}
+
+set strTable(1000_WELCOME_CD) \
+ "format %s \"[cdNameGet description]\""
+
+set strTable(1000_WELCOME1) \
+ "format %s \"Welcome to the SETUP program. This program will\
+ install \[cdromDescGet\] on your computer.\""
+
+set strTable(1010_WELCOME2) \
+ "It is strongly recommended that you exit all programs and disable virus\
+ protection before running this SETUP program."
+
+set strTable(1020_WELCOME3) \
+ "At any time, you can quit the SETUP program by clicking the <Cancel>\
+ button. You also can go back to previous dialog boxes by clicking the\
+ <Back> button. To accept the current settings for a dialog box and go on\
+ with the installation process, click the <Next> button."
+
+set strTable(3020_WELCOME3) \
+ "format %s \"At any prompt, you can cancel installation \[cdromDescGet\]\
+ by typing \'exit\'. You can also go to the previous question\
+ by typing \'-\'. To accept current settings and go on with\
+ the installation process, press <Return>.\""
+
+set strTable(1030_WELCOME4) \
+ "WARNING: This program is protected by copyright law and international\
+ treaties."
+
+set strTable(1040_WELCOME5) \
+ "Unauthorized reproduction or distribution of this program, or any portion\
+ of it, may result in severe civil and criminal penalties, and will be\
+ prosecuted to the maximum extent possible under law."
+
+set strTable(1050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\] is not\
+ recommended. We suggest that you logoff and logon as a normal\
+ user before running this program.\
+ \n\nClick Next to continue with SETUP anyway.\""
+
+set strTable(3050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\]\
+ is not recommended. We suggest that you logoff and \
+ logon as a normal user before running this program.\
+ \n\nPress <Return> to continue with SETUP anyway.\""
+
+set strTable(1051_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] without System Administrator\
+ privileges is not recommended. Under your present privileges,\
+ SETUP will not offer certain installation options, such as \
+ the installation of some services, etc. Also, the software\
+ will be installed as a personal copy and will not be visible\
+ to other users on this machine.\
+ \n\nTo install \[cdromDescGet\] with access to all its\
+ installation features and options, we suggest that you exit\
+ the installation now and rerun it later with System\
+ Administrator\'s privileges.\n\nClick <Next> to continue with\
+ SETUP anyway.\""
+
+set strTable(1060_REGISTRATION) \
+ "Below, type your name, the name of your company."
+
+set strTable(1070_WARN_1) \
+ "The installation key you entered is invalid. Please enter a valid\
+ installation key."
+
+set strTable(1071_WARN_1) \
+ "Please enter the requested information."
+
+set strTable(1080_WARN_2) \
+ "You entered a key that was not created for this CD-ROM. Please verify\
+ that you are using the appropriate key. If this problem persists, contact\
+ Wind River Systems Sales department for help."
+
+set strTable(1080_WARN_3) \
+ "The installation key you entered is meant for other vendor's CD-ROM.\
+ Please contact the vendor who issued the CD-ROM for a proper key."
+
+set strTable(1085_WARN_4) \
+ "This CD-ROM does not require an installation key. Click the \"Next\"\
+ button to continue the installation."
+
+set strTable(1090_WARN_3) \
+ "format %s \"Can\'t initiate SETUP: \[lindex \$args 0\]. Please correct\
+ the problem then run SETUP again.\""
+
+set strTable(1095_WARN_NO_TCPIP) \
+ "SETUP has detected that your system does not have TCP-IP installed.\
+ To correct the problem, please contact your administrator and then\
+ run SETUP again.\nAborting setup."
+
+set strTable(1097_WARN_NO_LONGFILENAME_SUP) \
+ "SETUP has detected that your system does not have long filename\
+ support. To correct the problem, please contact your administrator\
+ and then run SETUP again.\nAborting setup."
+
+set strTable(1105_FULL_INSTALL) \
+ "Installs the Tornado products, tools, compilers, and other optional\
+ components that you may have purchased."
+
+set strTable(1107_PROGRAM_GROUP) \
+"Installs only the Tornado program group and tools icons for access to\
+ Tornado tools installed on a remote server."
+
+set strTable(1100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP to\
+ install \[cdromDescGet\].\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(1100_REMOTE_DIR) \
+ "format %s \"Please type the name of the directory where Tornado has\
+ already been installed.\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(3100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP\
+ to install \[cdromDescGet\].\""
+
+set strTable(1110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist.\
+ \nDo you want to create it now?"
+
+set strTable(3110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist."
+
+set strTable(3115_DEST_DIR_QUESTION) \
+ "Do you want to create it now? \[y\]"
+
+set strTable(1111_DEST_DIR_WARN) \
+ "format %s \"Installing \[cdromDescGet\] in the root directory is not\
+ recommended.\nClick <Yes> to select another directory.\""
+
+set strTable(1120_DEST_DIR_WARN2) \
+ "format %s \"Creating \[destDirGet\] failed: file exists.\""
+
+set strTable(1121_DEST_DIR_WARN2) \
+ "format %s \"Installing in \[destDirGet\] is not recommended.\
+ \nDo you want to change the installation directory?\""
+
+set strTable(1122_DEST_DIR_WARN2) \
+ "format %s \"Unable to create \[destDirGet\].\""
+
+set strTable(1130_DEST_DIR_WARN3) \
+ "You do not have permission to write files into the installation directory\
+ you entered.\
+ \n\nPlease choose a writable directory."
+
+set strTable(1135_DEST_DIR_WARN4) \
+ "format %s \"The installation directory you entered contains white\
+ space(s). Please select another directory.\""
+
+set strTable(1137_DUP_PRODUCT_WARN) \
+ "format %s \"Reinstalling products may potentially destroy any\
+ modifications you may have made to previously installed files.\
+ Do you wish to continue with the installation or go back to the\
+ '\[strTableGet 1450_TITLE_OPTION\]' page to reconsider your choices?\""
+
+set strTable(3155_COMP_SELECT_QUESTION) \
+ "Do you want to go back and specify a directory on a bigger partition?\
+ \[y\]"
+
+set strTable(1140_COMP_SELECT) \
+ "format %s \"In the option list below, please check all items you wish\
+ to install. SETUP files will be copied to your selected directory and\
+ take up \[setupSizeGet\] MB of disk space.\n\""
+
+set strTable(3140_COMP_SELECT) \
+ "In the option list below, select the item(s) you want to install."
+
+set strTable(3145_COMP_SELECT_CHANGE) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(3145_COMP_SELECT_CHANGE_INVALID) \
+ "The item number(s) you entered is not valid."
+
+set strTable(1150_COMP_SELECT_WARN) \
+ "There is not enough disk space to install the selected component(s).\
+ \n\nDo you want to go back and specify a directory on a bigger disk or\
+ partition?"
+
+set strTable(3150_COMP_SELECT_WARN) \
+ "There is not enough space to install the selected component(s)."
+
+set strTable(1151_COMP_SELECT_WARN) \
+ "At least one component must be selected to continue installation."
+
+set strTable(1160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested.\
+ \n\nThe selected button(s) below indicate the file permissions which\
+ will be set during the installation process.\
+ \n\nPlease adjust these to suit your site requirements."
+
+set strTable(3160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested."
+
+set strTable(3162_PERMISSION) \
+ "The list below indicates the file permissions which will be set during\
+ the installation process. Please adjust these to suit your site\
+ requirements."
+
+set strTable(3165_PERMISSION_QUESTION) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(1161_FOLDER_SELECT) \
+ "SETUP will add program icons to the Program Folder listed below. You may\
+ type a new folder name, or select one from the existing Folders list."
+
+set strTable(1162_FOLDER_SELECT) \
+ "Please enter a valid folder name."
+
+set strTable(1170_FILE_COPY) \
+ "format %s \"SETUP is copying the selected component(s) to the directory\
+ \[destDirGet\].\""
+
+set strTable(1171_FILE_COPY) \
+ "format %s \"SETUP cannot read \[setupFileNameGet 0\] from the CD-ROM.\
+ Please ensure that the CD-ROM is properly mounted.\""
+
+set strTable(1180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries. We recommend that you let\
+ SETUP finish this step, or the libraries will be in an inconsistent\
+ state. Please be patient as the process may take several minutes. \
+ If you want to quit the SETUP program, click <Cancel> and run\
+ the SETUP program again at a later time."
+
+set strTable(3180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries."
+
+set strTable(1190_REGISTRY_HOST) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host.\
+ \n\nPlease enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1191_REGISTRY_DESC) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host."
+
+set strTable(1192_REGISTRY_NAME) \
+ "Please enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1200_FINISH_WARN) \
+ "format %s \"However, there were \[errorCountGet\] error(s) which occured\
+ during the process. Please review the log file\
+ \[destDirDispGet\]/setup.log for more information.\""
+
+set strTable(1210_FINISH) \
+ "format %s \"SETUP has completed installing the selected product(s).\""
+
+set strTable(1212_FINISH) \
+ "SETUP has completed installing the program folders and icons."
+
+set strTable(1213_FINISH) \
+ "Terminating SETUP program."
+
+set strTable(1360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run\
+ the SETUP program at a later time to complete the\
+ installation.\
+ \n\nTo continue installing the program, click <Resume>. \
+ To quit the SETUP program, click <Exit SETUP>.\""
+
+set strTable(3360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run the\
+ SETUP program at a later time to complete the installation.\
+ \n\nTo continue installing the program, Press <Return>. \
+ To quit the SETUP program, type \'exit\'.\""
+
+set strTable(1370_FILE_ACCESS_ERROR) \
+ "format %s \"SETUP cannot create/update file \[lindex \$args 0\]:\
+ \[lindex \$args 1\]\""
+
+set strTable(1380_DEFLATE_ERROR) \
+ "format %s \"SETUP isn\'t able to deflate \[setupFileNameGet 0\]\
+ \n\nPlease select one of the following options\
+ to continue with the SETUP process.\""
+
+set strTable(1390_MEMORY_LOW) \
+ "The system is running out of memory. To continue, close applications\
+ or increase the system swap space."
+
+set strTable(1400_DISK_FULL) \
+ "No disk space left. To continue, free up some disk space."
+
+set strTable(1550_USAGE) \
+ "Usage: SETUP /I\[con\]\]\t\n\
+ /I : Add standard Tornado icons \n\
+ from a remote installation"
+
+set strTable(1410_TITLE_WELCOME) "Welcome"
+set strTable(1420_TITLE_WARNING) "Warning"
+set strTable(1430_TITLE_REGISTRATION) "User Registration"
+set strTable(1440_TITLE_DESTDIR) "Select Directory"
+set strTable(1450_TITLE_OPTION) "Select Products"
+set strTable(1460_TITLE_PERMISSION) "Permission"
+set strTable(1470_TITLE_FILECOPY) "Copying Files"
+set strTable(1480_TITLE_LIBUPDATE) "Update Libraries"
+set strTable(1490_TITLE_REGISTRY_HOST) "Tornado Registry"
+set strTable(1495_TITLE_BACKWARD_COMPATIBILITY) "Backward Compatibility"
+set strTable(1500_TITLE_FINISH) "Finish"
+set strTable(1560_TITLE_FOLDER) "Select Folder"
+set strTable(1563_TITLE_DLL_REG) "Software Registration"
+set strTable(1567_TITLE_DCOM) "DCOM Installation"
+
+set strTable(1570_OPTION_SELECT) \
+ "Choose one of the options listed below, then click the\
+ <Next> button to continue the installation."
+
+set strTable(1576_OPTION_MANUAL) \
+ "Install Tornado Registry manually"
+
+set strTable(1577_OPTION_STARTUP) \
+ "Install Tornado Registry locally in the Startup Group"
+
+set strTable(1578_OPTION_SERVICE) \
+ "Install Tornado Registry locally as a Service"
+
+set strTable(1579_OPTION_REMOTE) \
+ "Configure to use a remote Tornado Registry"
+
+set strTable(1580_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group or as an\
+ NT Service. For more information, consult your Tornado User\'s Guide."
+
+set strTable(1581_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group. For more\
+ information, consult your Tornado User\'s Guide."
+
+set strTable(3000_RETURN_QUESTION) \
+ "Press <Return> to continue"
+
+set strTable(3055_EXIT_QUESTION) \
+ "Type \'exit\' to quit the program or press <Return> to continue"
+
+set strTable(3370_BACK_CALLBACK) \
+ "Cannot go back further."
+
+set strTable(1080_WARN_4) \
+ "The installation key you entered attempted to unlock one or more \
+ products that may have been removed from our product line. \
+ Please compare the unlocked product list on the\
+ \"[strTableGet 1450_TITLE_OPTION]\" screen with your purchased order\
+ list, and contact us if you discover any differences."
+
+set strTable(4000_BASE_INSTALL_WARN) \
+ "format %s \"Warning! Re-installing Tornado over an existing \
+ tree will overwrite any installed patches. \
+ If you proceed with the installation, please \
+ re-install patches if any.\""
+
+set strTable(4000_BASE_INSTALL_WARN_1) \
+ "Select <Install> to overwrite existing Tornado installation,\
+ or choose <Select Path> to enable you to back up to the \'Select\
+ Directory\' page to enter an alternate path."
+
+set strTable(4010_FILE_EXISTS_OLDER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is older. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_NEWER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is newer. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_WARN_1) \
+ "Overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_2) \
+ "Do not overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_3) \
+ "Overwrite ALL files, do not show this dialog again."
+
+set strTable(4020_ANALYZING_BANNER) \
+ "Analyzing installation files, please wait..."
+
+set strTable(4030_NO_ZIP_FILE) \
+ "format %s \"SETUP cannot find the ZIP files for installing\
+ \[cdromDescGet\] in the default directory.\n\n\
+ Please type the name of the WIND\
+ directory containing the ZIP files.\n\nClick the\
+ <Browse> button to choose the directory interactively.\""
+
+set strTable(4040_LIC_TEXT) \
+ "Attention: By clicking on the \"I accept\" button or by\
+ Installing the software you are consenting to be bound by\
+ the terms of this agreement (this \"Agreement\"). If you do\
+ not agree to all of the terms, click the \"I don't Accept\" button\
+ and do not install this software. A copy of this Agreement can be viewed\
+ in the Setup directory under the destination path that you have\
+ designated after the installation is completed."
+
+set strTable(4050_PROJECT_TEXT) \
+ "Please enter your project name, and the number of licensed\
+ users on the project in the spaces below."
+
+set strTable(4060_LICENSE_TEXT) \
+ "By clicking on the \"I accept\" button \
+ you are consenting to be bound by the terms of this agreement.\
+ If you do not agree to all of the terms, click the \"Cancel\"\
+ button and do not install this software."
+
+set strTable(4070_DLL_TEXT) \
+ "SETUP is registering software on your machine. This will take a few\
+ minutes."
+
+set strTable(4080_DCOM_TEXT) \
+ "Setup has detected that your COM/DCOM DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding.\
+ \n\nWould you like to install \"DCOM95\" now?"
+
+set strTable(4082_DCOM95_AND_COMCTL_TEXT) \
+ "Setup has detected that your COM/DCOM and Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You must reboot your system after DLL files have been\
+ installed. After rebooting, please rerun SETUP to continue with\
+ installation.\
+ \n\n\
+ Note: 401comupd.exe and DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding\
+ \n\nWould you like to install \"401comupd.exe\" and \"DCOM95\" now?"
+
+set strTable(4085_COMCTL_UPDATE_TEXT) \
+ "Setup has detected that your Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The 401comupd.exe installation program updates your system DLLs. You\
+ should save all open documents and close all programs before installing\
+ 401comupd.exe.\
+ \n\nWould you like to install \"401comupd.exe\" now?"
+
+set strTable(4090_README_TEXT) \
+ "Please read the README file contents that are displayed below.\
+ It contains important information that will enable you to install\
+ and successfully run the BerkeleyDB product. For your convenience\
+ this file is copied to your installation directory path."
+
+set strTable(5000_PATCHES_REQUIRED_TEXT) \
+ "SETUP has detected that required operating system patches\
+ have not been installed on this machine. These patches are\
+ necessary for the correct operation of SETUP and Tornado. Please refer\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\
+ you can continue with installation:\n\n"
+
+set strTable(5001_PATCHES_RECOMMENDED_TEXT) \
+ "\n\nSETUP has also detected that recommended operating system patches\
+ have not been installed. It is recommended that these patches are\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5002_PATCHES_RECOMMENDED_TEXT) \
+ "SETUP has detected that some operating system patches have not been\
+ installed on this machine. It is recommended that these\
+ patches are installed before starting Tornado to ensure correct\
+ operation. Please refer to the Tornado Release Notes\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5003_PATCHES_REQUIRED_FORMATTED_TEXT) \
+ "\n SETUP has detected that required operating system patches\n\
+ have not been installed on this machine. These patches are\n\
+ necessary for the correct operation of SETUP and Tornado. Please refer\n\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\n\
+ you can continue with installation:\n\n"
+
+set strTable(5004_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n\n SETUP has also detected that recommended operating system patches\n\
+ have not been installed. It is recommended that these patches are\n\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5005_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n SETUP has detected that some operating system patches have not been\n\
+ installed on this machine. It is recommended that these\n\
+ patches are installed before starting Tornado to ensure correct\n\
+ operation. Please refer to the Tornado Release Notes\n\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5006_PATCHES_SUN_LOCATION) \
+ "\nPatches for Sun machines are available at http://sunsolve.sun.com.\n"
+
+set strTable(5007_PATCHES_HP_LOCATION) \
+ "\nPatches for HP machines are available at:\n\
+ http://us-support.external.hp.com (US, Canada, Asia-Pacific, and\
+ Latin-America)\n\
+ http://europe-support.external.hp.com (Europe)\n"
+
+set strTable(5008_PATCHES_UPDATE) \
+ "\nNote: System vendors very frequently update and replace patches.\
+ If a specific patch is no longer available, please use the\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5009_PATCHES_UPDATE_FORMATTED) \
+ "\n Note: System vendors very frequently update and replace patches.\n\
+ If a specific patch is no longer available, please use the\n\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5010_DRIVERS_INFO) \
+ "The installation of the Driver component is required because\n\
+ you have selected the basic Tornado product for installation.\n\n\
+ If you wish to uncheck this item you must uncheck either the\n\
+ basic Tornado and/or Tornado Simulator product(s) or go to the\n\
+ 'Details' button for Tornado and uncheck both the Simulator and\n\
+ the Tornado Object parts."
+
+set strTable(5020_DO_NOT_SAVE_KEY_FOR_FAE) \
+ "The installation key you are about to enter will NOT\
+ be saved in the system registry.\nIs this what you want?"
+
+set strTable(5030_BACKWARD_COMPATIBILITY) \
+ "While the portmapper is not needed for Tornado 2.0, it is\
+ included in this release for development environments in\
+ which both Tornado 2.0 and Tornado 1.0.1 are in use.\
+ \n\nWould you like to use your Tornado 1.0.x tools with Tornado 2.0?"
+
+set strTable(5040_BACKWARD_COMPATIBILITY) \
+ "Note:\
+ \n\nIf you have selected to install the Tornado Registry as\
+ a service, there is no way to retain backward compatibility\
+ with Tornado 1.0.x."
+
+set strTable(5050_BACKWARD_COMPATIBILITY) \
+ "For more information on backward compatibility,\
+ please consult the Tornado 2.0 Release Notes."
diff --git a/storage/bdb/dist/vx_setup/README.in b/storage/bdb/dist/vx_setup/README.in
new file mode 100644
index 00000000000..f96948c37ba
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/README.in
@@ -0,0 +1,7 @@
+README.TXT: Sleepycat Software Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@ Release v@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Information on known problems, changes introduced with the
+current revision of the CD-ROM, and other product bulletins
+can be obtained from the Sleepycat Software web site:
+
+ http://www.sleepycat.com/
diff --git a/storage/bdb/dist/vx_setup/SETUP.BMP b/storage/bdb/dist/vx_setup/SETUP.BMP
new file mode 100644
index 00000000000..2918480b8c2
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/SETUP.BMP
Binary files differ
diff --git a/storage/bdb/dist/vx_setup/vx_allfile.in b/storage/bdb/dist/vx_setup/vx_allfile.in
new file mode 100644
index 00000000000..61a1b8ee805
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/vx_allfile.in
@@ -0,0 +1,5 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wsp
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_config.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_int.h
diff --git a/storage/bdb/dist/vx_setup/vx_demofile.in b/storage/bdb/dist/vx_setup/vx_demofile.in
new file mode 100644
index 00000000000..42a698ea367
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/vx_demofile.in
@@ -0,0 +1,3 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/README
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.c
diff --git a/storage/bdb/dist/vx_setup/vx_setup.in b/storage/bdb/dist/vx_setup/vx_setup.in
new file mode 100644
index 00000000000..7bc3f510cfa
--- /dev/null
+++ b/storage/bdb/dist/vx_setup/vx_setup.in
@@ -0,0 +1,13 @@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.all
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-Demo
+@DB_SETUP_DIR@
+BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ Demo program
+demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.demo
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
diff --git a/storage/bdb/dist/win_config.in b/storage/bdb/dist/win_config.in
new file mode 100644
index 00000000000..09acab28806
--- /dev/null
+++ b/storage/bdb/dist/win_config.in
@@ -0,0 +1,439 @@
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+#if defined(_DEBUG)
+#if !defined(DEBUG)
+#define DEBUG 1
+#endif
+#endif
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_DIRENT_H */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+/* #undef HAVE_MUTEX_VXWORKS */
+
+/* Define to 1 to use Windows mutexes. */
+#define HAVE_MUTEX_WIN32 1
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+/* #undef HAVE_SCHED_YIELD */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef HAVE_SELECT */
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+#define HAVE_SNPRINTF 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef HAVE_UNISTD_H */
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
+
+/* Define to 1 if you have the `vsnprintf' function. */
+#define HAVE_VSNPRINTF 1
+
+/* Define to 1 if building VxWorks. */
+/* #undef HAVE_VXWORKS */
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+#define HAVE__FSTATI64 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+/*
+ * Win32 does not define getopt and friends in any header file, so we must.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+extern int optind;
+extern char *optarg;
+extern int getopt(int, char * const *, const char *);
+#if defined(__cplusplus)
+}
+#endif
+
+/*
+ * We use DB_WIN32 much as one would use _WIN32, to determine that we're
+ * using an operating system environment that supports Win32 calls
+ * and semantics. We don't use _WIN32 because cygwin/gcc also defines
+ * that, even though it closely emulates the Unix environment.
+ */
+#define DB_WIN32 1
+
+/*
+ * This is a grievous hack -- once we've included windows.h, we have no choice
+ * but to use ANSI-style varargs (because it pulls in stdarg.h for us). DB's
+ * code decides which type of varargs to use based on the state of __STDC__.
+ * Sensible. Unfortunately, Microsoft's compiler _doesn't_ define __STDC__
+ * unless you invoke it with arguments turning OFF all vendor extensions. Even
+ * more unfortunately, if we do that, it fails to parse windows.h!!!!! So, we
+ * define __STDC__ here, after windows.h comes in. Note: the compiler knows
+ * we've defined it, and starts enforcing strict ANSI compilance from this point
+ * on.
+ */
+#define __STDC__ 1
diff --git a/storage/bdb/dist/win_exports.in b/storage/bdb/dist/win_exports.in
new file mode 100644
index 00000000000..52df529d028
--- /dev/null
+++ b/storage/bdb/dist/win_exports.in
@@ -0,0 +1,134 @@
+# $Id: win_exports.in,v 1.25 2002/08/29 14:22:21 margo Exp $
+
+# Standard interfaces.
+ db_create
+ db_env_create
+ db_strerror
+ db_version
+ db_xa_switch
+ log_compare
+ txn_abort
+ txn_begin
+ txn_commit
+
+# Library configuration interfaces.
+ db_env_set_func_close
+ db_env_set_func_dirfree
+ db_env_set_func_dirlist
+ db_env_set_func_exists
+ db_env_set_func_free
+ db_env_set_func_fsync
+ db_env_set_func_ioinfo
+ db_env_set_func_malloc
+ db_env_set_func_map
+ db_env_set_func_open
+ db_env_set_func_read
+ db_env_set_func_realloc
+ db_env_set_func_rename
+ db_env_set_func_seek
+ db_env_set_func_sleep
+ db_env_set_func_unlink
+ db_env_set_func_unmap
+ db_env_set_func_write
+ db_env_set_func_yield
+
+# Needed for application-specific logging and recovery routines.
+ __db_add_recovery
+
+# These are needed to link the tcl library.
+ __db_dbm_close
+ __db_dbm_delete
+ __db_dbm_fetch
+ __db_dbm_firstkey
+ __db_dbm_init
+ __db_dbm_nextkey
+ __db_dbm_store
+ __db_hcreate
+ __db_hdestroy
+ __db_hsearch
+ __db_loadme
+ __db_ndbm_clearerr
+ __db_ndbm_close
+ __db_ndbm_delete
+ __db_ndbm_dirfno
+ __db_ndbm_error
+ __db_ndbm_fetch
+ __db_ndbm_firstkey
+ __db_ndbm_nextkey
+ __db_ndbm_open
+ __db_ndbm_pagfno
+ __db_ndbm_rdonly
+ __db_ndbm_store
+ __db_panic
+ __db_r_attach
+ __db_r_detach
+ __db_win32_mutex_init
+ __db_win32_mutex_lock
+ __db_win32_mutex_unlock
+ __ham_func2
+ __ham_func3
+ __ham_func4
+ __ham_func5
+ __ham_test
+ __lock_dump_region
+ __memp_dump_region
+ __os_calloc
+ __os_closehandle
+ __os_free
+ __os_ioinfo
+ __os_malloc
+ __os_open
+ __os_openhandle
+ __os_read
+ __os_realloc
+ __os_strdup
+ __os_umalloc
+ __os_write
+
+#These are needed for linking tools or java.
+ __bam_init_print
+ __bam_pgin
+ __bam_pgout
+ __crdel_init_print
+ __db_dispatch
+ __db_dump
+ __db_e_stat
+ __db_err
+ __db_getlong
+ __db_getulong
+ __db_global_values
+ __db_init_print
+ __db_inmemdbflags
+ __db_isbigendian
+ __db_omode
+ __db_overwrite
+ __db_pgin
+ __db_pgout
+ __db_prdbt
+ __db_prfooter
+ __db_prheader
+ __db_rpath
+ __db_util_cache
+ __db_util_interrupted
+ __db_util_logset
+ __db_util_siginit
+ __db_util_sigresend
+ __db_verify_callback
+ __db_verify_internal
+ __dbreg_init_print
+ __fop_init_print
+ __ham_get_meta
+ __ham_init_print
+ __ham_pgin
+ __ham_pgout
+ __ham_release_meta
+ __os_clock
+ __os_get_errno
+ __os_id
+ __os_set_errno
+ __os_sleep
+ __os_ufree
+ __os_yield
+ __qam_init_print
+ __qam_pgin_out
+ __txn_init_print
diff --git a/storage/bdb/env/db_salloc.c b/storage/bdb/env/db_salloc.c
new file mode 100644
index 00000000000..1ef768d4114
--- /dev/null
+++ b/storage/bdb/env/db_salloc.c
@@ -0,0 +1,338 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_salloc.c,v 11.16 2002/08/24 20:27:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Implement shared memory region allocation, using simple first-fit algorithm.
+ * The model is that we take a "chunk" of shared memory store and begin carving
+ * it up into areas, similarly to how malloc works. We do coalescing on free.
+ *
+ * The "len" field in the __data struct contains the length of the free region
+ * (less the size_t bytes that holds the length). We use the address provided
+ * by the caller to find this length, which allows us to free a chunk without
+ * requiring that the caller pass in the length of the chunk they're freeing.
+ */
+SH_LIST_HEAD(__head);
+struct __data {
+ size_t len;
+ SH_LIST_ENTRY links;
+};
+
+/*
+ * __db_shalloc_init --
+ * Initialize the area as one large chunk.
+ *
+ * PUBLIC: void __db_shalloc_init __P((void *, size_t));
+ */
+void
+__db_shalloc_init(area, size)
+ void *area;
+ size_t size;
+{
+ struct __data *elp;
+ struct __head *hp;
+
+ hp = area;
+ SH_LIST_INIT(hp);
+
+ elp = (struct __data *)(hp + 1);
+ elp->len = size - sizeof(struct __head) - sizeof(elp->len);
+ SH_LIST_INSERT_HEAD(hp, elp, links, __data);
+}
+
+/*
+ * __db_shalloc_size --
+ * Return the space needed for an allocation, including alignment.
+ *
+ * PUBLIC: int __db_shalloc_size __P((size_t, size_t));
+ */
+int
+__db_shalloc_size(len, align)
+ size_t len, align;
+{
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ return ((int)(ALIGN(len, align) + sizeof (struct __data)));
+}
+
+/*
+ * __db_shalloc --
+ * Allocate some space from the shared region.
+ *
+ * PUBLIC: int __db_shalloc __P((void *, size_t, size_t, void *));
+ */
+int
+__db_shalloc(p, len, align, retp)
+ void *p, *retp;
+ size_t len, align;
+{
+ struct __data *elp;
+ size_t *sp;
+ void *rp;
+
+ /* Never allocate less than the size of a struct __data. */
+ if (len < sizeof(struct __data))
+ len = sizeof(struct __data);
+
+#ifdef DIAGNOSTIC
+ /* Add room for a guard byte. */
+ ++len;
+#endif
+
+ /* Never align to less than a db_align_t boundary. */
+ if (align <= sizeof(db_align_t))
+ align = sizeof(db_align_t);
+
+ /* Walk the list, looking for a slot. */
+ for (elp = SH_LIST_FIRST((struct __head *)p, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data)) {
+ /*
+ * Calculate the value of the returned pointer if we were to
+ * use this chunk.
+ * + Find the end of the chunk.
+ * + Subtract the memory the user wants.
+ * + Find the closest previous correctly-aligned address.
+ */
+ rp = (u_int8_t *)elp + sizeof(size_t) + elp->len;
+ rp = (u_int8_t *)rp - len;
+ rp = (u_int8_t *)((db_alignp_t)rp & ~(align - 1));
+
+ /*
+ * Rp may now point before elp->links, in which case the chunk
+ * was too small, and we have to try again.
+ */
+ if ((u_int8_t *)rp < (u_int8_t *)&elp->links)
+ continue;
+
+ *(void **)retp = rp;
+#ifdef DIAGNOSTIC
+ /*
+ * At this point, whether or not we still need to split up a
+ * chunk, retp is the address of the region we are returning,
+ * and (u_int8_t *)elp + sizeof(size_t) + elp->len gives us
+ * the address of the first byte after the end of the chunk.
+ * Make the byte immediately before that the guard byte.
+ */
+ *((u_int8_t *)elp + sizeof(size_t) + elp->len - 1) = GUARD_BYTE;
+#endif
+
+#define SHALLOC_FRAGMENT 32
+ /*
+ * If there are at least SHALLOC_FRAGMENT additional bytes of
+ * memory, divide the chunk into two chunks.
+ */
+ if ((u_int8_t *)rp >=
+ (u_int8_t *)&elp->links + SHALLOC_FRAGMENT) {
+ sp = rp;
+ *--sp = elp->len -
+ ((u_int8_t *)rp - (u_int8_t *)&elp->links);
+ elp->len -= *sp + sizeof(size_t);
+ return (0);
+ }
+
+ /*
+ * Otherwise, we return the entire chunk, wasting some amount
+ * of space to keep the list compact. However, because the
+ * address we're returning to the user may not be the address
+ * of the start of the region for alignment reasons, set the
+ * size_t length fields back to the "real" length field to a
+ * flag value, so that we can find the real length during free.
+ */
+#define ILLEGAL_SIZE 1
+ SH_LIST_REMOVE(elp, links, __data);
+ for (sp = rp; (u_int8_t *)--sp >= (u_int8_t *)&elp->links;)
+ *sp = ILLEGAL_SIZE;
+ return (0);
+ }
+
+ return (ENOMEM);
+}
+
+/*
+ * __db_shalloc_free --
+ * Free a shared memory allocation.
+ *
+ * PUBLIC: void __db_shalloc_free __P((void *, void *));
+ */
+void
+__db_shalloc_free(regionp, ptr)
+ void *regionp, *ptr;
+{
+ struct __data *elp, *lastp, *newp;
+ struct __head *hp;
+ size_t free_size, *sp;
+ int merged;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+ ptr = sp;
+
+ newp = (struct __data *)((u_int8_t *)ptr - sizeof(size_t));
+ free_size = newp->len;
+
+#ifdef DIAGNOSTIC
+ /*
+ * The "real size" includes the guard byte; it's just the last
+ * byte in the chunk, and the caller never knew it existed.
+ *
+ * Check it to make sure it hasn't been stomped.
+ */
+ if (*((u_int8_t *)ptr + free_size - 1) != GUARD_BYTE) {
+ /*
+ * Eventually, once we push a DB_ENV handle down to these
+ * routines, we should use the standard output channels.
+ */
+ fprintf(stderr,
+ "Guard byte incorrect during shared memory free.\n");
+ abort();
+ /* NOTREACHED */
+ }
+
+ /* Trash the returned memory (including guard byte). */
+ memset(ptr, CLEAR_BYTE, free_size);
+#endif
+
+ /*
+ * Walk the list, looking for where this entry goes.
+ *
+ * We keep the free list sorted by address so that coalescing is
+ * trivial.
+ *
+ * XXX
+ * Probably worth profiling this to see how expensive it is.
+ */
+ hp = (struct __head *)regionp;
+ for (elp = SH_LIST_FIRST(hp, __data), lastp = NULL;
+ elp != NULL && (void *)elp < (void *)ptr;
+ lastp = elp, elp = SH_LIST_NEXT(elp, links, __data))
+ ;
+
+ /*
+ * Elp is either NULL (we reached the end of the list), or the slot
+ * after the one that's being returned. Lastp is either NULL (we're
+ * returning the first element of the list) or the element before the
+ * one being returned.
+ *
+ * Check for coalescing with the next element.
+ */
+ merged = 0;
+ if ((u_int8_t *)ptr + free_size == (u_int8_t *)elp) {
+ newp->len += elp->len + sizeof(size_t);
+ SH_LIST_REMOVE(elp, links, __data);
+ if (lastp != NULL)
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ else
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ merged = 1;
+ }
+
+ /* Check for coalescing with the previous element. */
+ if (lastp != NULL && (u_int8_t *)lastp +
+ lastp->len + sizeof(size_t) == (u_int8_t *)newp) {
+ lastp->len += newp->len + sizeof(size_t);
+
+ /*
+ * If we have already put the new element into the list take
+ * it back off again because it's just been merged with the
+ * previous element.
+ */
+ if (merged)
+ SH_LIST_REMOVE(newp, links, __data);
+ merged = 1;
+ }
+
+ if (!merged) {
+ if (lastp == NULL)
+ SH_LIST_INSERT_HEAD(hp, newp, links, __data);
+ else
+ SH_LIST_INSERT_AFTER(lastp, newp, links, __data);
+ }
+}
+
+/*
+ * __db_shsizeof --
+ * Return the size of a shalloc'd piece of memory.
+ *
+ * !!!
+ * Note that this is from an internal standpoint -- it includes not only
+ * the size of the memory being used, but also the extra alignment bytes
+ * in front and, #ifdef DIAGNOSTIC, the guard byte at the end.
+ *
+ * PUBLIC: size_t __db_shsizeof __P((void *));
+ */
+size_t
+__db_shsizeof(ptr)
+ void *ptr;
+{
+ struct __data *elp;
+ size_t *sp;
+
+ /*
+ * Step back over flagged length fields to find the beginning of
+ * the object and its real size.
+ */
+ for (sp = (size_t *)ptr; sp[-1] == ILLEGAL_SIZE; --sp)
+ ;
+
+ elp = (struct __data *)((u_int8_t *)sp - sizeof(size_t));
+ return (elp->len);
+}
+
+/*
+ * __db_shalloc_dump --
+ *
+ * PUBLIC: void __db_shalloc_dump __P((void *, FILE *));
+ */
+void
+__db_shalloc_dump(addr, fp)
+ void *addr;
+ FILE *fp;
+{
+ struct __data *elp;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ fprintf(fp, "%s\nMemory free list\n", DB_LINE);
+
+ for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
+ elp != NULL;
+ elp = SH_LIST_NEXT(elp, links, __data))
+ fprintf(fp, "%#lx: %lu\t", P_TO_ULONG(elp), (u_long)elp->len);
+ fprintf(fp, "\n");
+}
diff --git a/storage/bdb/env/db_shash.c b/storage/bdb/env/db_shash.c
new file mode 100644
index 00000000000..743a126307d
--- /dev/null
+++ b/storage/bdb/env/db_shash.c
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_shash.c,v 11.6 2002/03/01 17:22:16 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * Table of good hash values. Up to ~250,000 buckets, we use powers of 2.
+ * After that, we slow the rate of increase by half. For each choice, we
+ * then use a nearby prime number as the hash value.
+ *
+ * If a terabyte is the maximum cache we'll see, and we assume there are
+ * 10 1K buckets on each hash chain, then 107374182 is the maximum number
+ * of buckets we'll ever need.
+ */
+static const struct {
+ u_int32_t power;
+ u_int32_t prime;
+} list[] = {
+ { 32, 37}, /* 2^5 */
+ { 64, 67}, /* 2^6 */
+ { 128, 131}, /* 2^7 */
+ { 256, 257}, /* 2^8 */
+ { 512, 521}, /* 2^9 */
+ { 1024, 1031}, /* 2^10 */
+ { 2048, 2053}, /* 2^11 */
+ { 4096, 4099}, /* 2^12 */
+ { 8192, 8191}, /* 2^13 */
+ { 16384, 16381}, /* 2^14 */
+ { 32768, 32771}, /* 2^15 */
+ { 65536, 65537}, /* 2^16 */
+ { 131072, 131071}, /* 2^17 */
+ { 262144, 262147}, /* 2^18 */
+ { 393216, 393209}, /* 2^18 + 2^18/2 */
+ { 524288, 524287}, /* 2^19 */
+ { 786432, 786431}, /* 2^19 + 2^19/2 */
+ { 1048576, 1048573}, /* 2^20 */
+ { 1572864, 1572869}, /* 2^20 + 2^20/2 */
+ { 2097152, 2097169}, /* 2^21 */
+ { 3145728, 3145721}, /* 2^21 + 2^21/2 */
+ { 4194304, 4194301}, /* 2^22 */
+ { 6291456, 6291449}, /* 2^22 + 2^22/2 */
+ { 8388608, 8388617}, /* 2^23 */
+ { 12582912, 12582917}, /* 2^23 + 2^23/2 */
+ { 16777216, 16777213}, /* 2^24 */
+ { 25165824, 25165813}, /* 2^24 + 2^24/2 */
+ { 33554432, 33554393}, /* 2^25 */
+ { 50331648, 50331653}, /* 2^25 + 2^25/2 */
+ { 67108864, 67108859}, /* 2^26 */
+ { 100663296, 100663291}, /* 2^26 + 2^26/2 */
+ { 134217728, 134217757}, /* 2^27 */
+ { 201326592, 201326611}, /* 2^27 + 2^27/2 */
+ { 268435456, 268435459}, /* 2^28 */
+ { 402653184, 402653189}, /* 2^28 + 2^28/2 */
+ { 536870912, 536870909}, /* 2^29 */
+ { 805306368, 805306357}, /* 2^29 + 2^29/2 */
+ {1073741824, 1073741827}, /* 2^30 */
+ {0, 0}
+};
+
+/*
+ * __db_tablesize --
+ * Choose a size for the hash table.
+ *
+ * PUBLIC: int __db_tablesize __P((u_int32_t));
+ */
+int
+__db_tablesize(n_buckets)
+ u_int32_t n_buckets;
+{
+ int i;
+
+ /*
+ * We try to be clever about how big we make the hash tables. Use a
+ * prime number close to the "suggested" number of elements that will
+ * be in the hash table. Use 64 as the minimum hash table size.
+ *
+ * Ref: Sedgewick, Algorithms in C, "Hash Functions"
+ */
+ if (n_buckets < 32)
+ n_buckets = 32;
+
+ for (i = 0;; ++i) {
+ if (list[i].power == 0) {
+ --i;
+ break;
+ }
+ if (list[i].power >= n_buckets)
+ break;
+ }
+ return (list[i].prime);
+}
+
+/*
+ * __db_hashinit --
+ * Initialize a hash table that resides in shared memory.
+ *
+ * PUBLIC: void __db_hashinit __P((void *, u_int32_t));
+ */
+void
+__db_hashinit(begin, nelements)
+ void *begin;
+ u_int32_t nelements;
+{
+ u_int32_t i;
+ SH_TAILQ_HEAD(hash_head) *headp;
+
+ headp = (struct hash_head *)begin;
+
+ for (i = 0; i < nelements; i++, headp++)
+ SH_TAILQ_INIT(headp);
+}
diff --git a/storage/bdb/env/env_file.c b/storage/bdb/env/env_file.c
new file mode 100644
index 00000000000..f221fd8d701
--- /dev/null
+++ b/storage/bdb/env/env_file.c
@@ -0,0 +1,166 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_file.c,v 1.5 2002/03/08 17:47:18 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_overwrite_pass __P((DB_ENV *,
+ const char *, DB_FH *, u_int32_t, u_int32_t, u_int32_t));
+
+/*
+ * __db_fileinit --
+ * Initialize a regular file, optionally zero-filling it as well.
+ *
+ * PUBLIC: int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+ */
+int
+__db_fileinit(dbenv, fhp, size, zerofill)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t size;
+ int zerofill;
+{
+ db_pgno_t pages;
+ size_t i;
+ size_t nw;
+ u_int32_t relative;
+ int ret;
+ char buf[OS_VMPAGESIZE];
+
+ /* Write nuls to the new bytes. */
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * Extend the region by writing the last page. If the region is >4Gb,
+ * increment may be larger than the maximum possible seek "relative"
+ * argument, as it's an unsigned 32-bit value. Break the offset into
+ * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
+ * than any memory I expect to see for awhile).
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
+ return (ret);
+ pages = (db_pgno_t)((size - OS_VMPAGESIZE) / MEGABYTE);
+ relative = (u_int32_t)((size - OS_VMPAGESIZE) % MEGABYTE);
+ if ((ret = __os_seek(dbenv,
+ fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ return (ret);
+
+ /*
+ * We may want to guarantee that there is enough disk space for the
+ * file, so we also write a byte to each page. We write the byte
+ * because reading it is insufficient on systems smart enough not to
+ * instantiate disk pages to satisfy a read (e.g., Solaris).
+ */
+ if (zerofill) {
+ pages = (db_pgno_t)(size / MEGABYTE);
+ relative = (u_int32_t)(size % MEGABYTE);
+ if ((ret = __os_seek(dbenv, fhp,
+ MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
+ return (ret);
+
+ /* Write a byte to each page. */
+ for (i = 0; i < size; i += OS_VMPAGESIZE) {
+ if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv, fhp,
+ 0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __db_overwrite --
+ * Overwrite a file.
+ *
+ * PUBLIC: int __db_overwrite __P((DB_ENV *, const char *));
+ */
+int
+__db_overwrite(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ DB_FH fh, *fhp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ fhp = &fh;
+ if ((ret = __os_open(dbenv, path, DB_OSO_REGION, 0, fhp)) == 0 &&
+ (ret = __os_ioinfo(dbenv, path, fhp, &mbytes, &bytes, NULL)) == 0) {
+ /*
+ * !!!
+ * Overwrite a regular file with alternating 0xff, 0x00 and 0xff
+ * byte patterns. Implies a fixed-block filesystem, journaling
+ * or logging filesystems will require operating system support.
+ */
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0x00)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ } else
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+err: if (F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ return (ret);
+}
+
+/*
+ * __db_overwrite_pass --
+ * A single pass over the file, writing the specified byte pattern.
+ */
+static int
+__db_overwrite_pass(dbenv, path, fhp, mbytes, bytes, pattern)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t mbytes, bytes, pattern;
+{
+ size_t len, nw;
+ int i, ret;
+ char buf[8 * 1024];
+
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ memset(buf, pattern, sizeof(buf));
+
+ for (; mbytes > 0; --mbytes)
+ for (i = MEGABYTE / sizeof(buf); i > 0; --i)
+ if ((ret =
+ __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ goto err;
+ for (; bytes > 0; bytes -= (u_int32_t)len) {
+ len = bytes < sizeof(buf) ? bytes : sizeof(buf);
+ if ((ret = __os_write(dbenv, fhp, buf, len, &nw)) != 0)
+ goto err;
+ }
+
+ if ((ret = __os_fsync(dbenv, fhp)) != 0)
+err: __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+ return (ret);
+}
diff --git a/storage/bdb/env/env_method.c b/storage/bdb/env/env_method.c
new file mode 100644
index 00000000000..b51237ec44a
--- /dev/null
+++ b/storage/bdb/env/env_method.c
@@ -0,0 +1,643 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_method.c,v 11.87 2002/08/29 14:22:21 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+/*
+ * This is the file that initializes the global array. Do it this way because
+ * people keep changing one without changing the other. Having declaration and
+ * initialization in one file will hopefully fix that.
+ */
+#define DB_INITIALIZE_DB_GLOBALS 1
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
+static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_init __P((DB_ENV *));
+static int __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __dbenv_set_app_dispatch __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
+static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
+static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+static void __dbenv_set_noticecall __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+static int __dbenv_set_rpc_server_noclnt
+ __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
+static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+
+/*
+ * db_env_create --
+ * DB_ENV constructor.
+ *
+ * EXTERN: int db_env_create __P((DB_ENV **, u_int32_t));
+ */
+int
+db_env_create(dbenvpp, flags)
+ DB_ENV **dbenvpp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * !!!
+ * We can't call the flags-checking routines, we don't have an
+ * environment yet.
+ */
+ if (flags != 0 && flags != DB_CLIENT)
+ return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(*dbenv), &dbenv)) != 0)
+ return (ret);
+
+#ifdef HAVE_RPC
+ if (LF_ISSET(DB_CLIENT))
+ F_SET(dbenv, DB_ENV_RPCCLIENT);
+#endif
+ ret = __dbenv_init(dbenv);
+
+ if (ret != 0) {
+ __os_free(NULL, dbenv);
+ return (ret);
+ }
+
+ *dbenvpp = dbenv;
+ return (0);
+}
+
+/*
+ * __dbenv_init --
+ * Initialize a DB_ENV structure.
+ */
+static int
+__dbenv_init(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * Set up methods that are the same in both normal and RPC
+ */
+ dbenv->err = __dbenv_err;
+ dbenv->errx = __dbenv_errx;
+ dbenv->set_errcall = __dbenv_set_errcall;
+ dbenv->set_errfile = __dbenv_set_errfile;
+ dbenv->set_errpfx = __dbenv_set_errpfx;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->close = __dbcl_env_close;
+ dbenv->dbremove = __dbcl_env_dbremove;
+ dbenv->dbrename = __dbcl_env_dbrename;
+ dbenv->open = __dbcl_env_open_wrap;
+ dbenv->remove = __dbcl_env_remove;
+ dbenv->set_alloc = __dbcl_env_alloc;
+ dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
+ dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_encrypt = __dbcl_env_encrypt;
+ dbenv->set_feedback = __dbcl_env_set_feedback;
+ dbenv->set_flags = __dbcl_env_flags;
+ dbenv->set_noticecall = __dbcl_env_noticecall;
+ dbenv->set_paniccall = __dbcl_env_paniccall;
+ dbenv->set_rpc_server = __dbcl_envrpcserver;
+ dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tas_spins = __dbcl_set_tas_spins;
+ dbenv->set_timeout = __dbcl_set_timeout;
+ dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
+ dbenv->set_verbose = __dbcl_set_verbose;
+ } else {
+#endif
+ dbenv->close = __dbenv_close;
+ dbenv->dbremove = __dbenv_dbremove;
+ dbenv->dbrename = __dbenv_dbrename;
+ dbenv->open = __dbenv_open;
+ dbenv->remove = __dbenv_remove;
+ dbenv->set_alloc = __dbenv_set_alloc;
+ dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
+ dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_encrypt = __dbenv_set_encrypt;
+ dbenv->set_feedback = __dbenv_set_feedback;
+ dbenv->set_flags = __dbenv_set_flags;
+ dbenv->set_noticecall = __dbenv_set_noticecall;
+ dbenv->set_paniccall = __dbenv_set_paniccall;
+ dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
+ dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tas_spins = __dbenv_set_tas_spins;
+ dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
+ dbenv->set_verbose = __dbenv_set_verbose;
+#ifdef HAVE_RPC
+ }
+#endif
+ dbenv->shm_key = INVALID_REGION_SEGID;
+ dbenv->db_ref = 0;
+
+ __log_dbenv_create(dbenv); /* Subsystem specific. */
+ __lock_dbenv_create(dbenv);
+ __memp_dbenv_create(dbenv);
+ __rep_dbenv_create(dbenv);
+ __txn_dbenv_create(dbenv);
+
+ return (0);
+}
+
+/*
+ * __dbenv_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+#else
+__dbenv_err(dbenv, error, fmt, va_alist)
+ const DB_ENV *dbenv;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbenv_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__dbenv_errx(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 1, fmt);
+}
+
+static int
+__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+ DB_ENV *dbenv;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_alloc");
+
+ dbenv->db_malloc = mal_func;
+ dbenv->db_realloc = real_func;
+ dbenv->db_free = free_func;
+ return (0);
+}
+
+/*
+ * __dbenv_set_app_dispatch --
+ * Set the transaction abort recover function.
+ */
+static int
+__dbenv_set_app_dispatch(dbenv, app_dispatch)
+ DB_ENV *dbenv;
+ int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_app_dispatch");
+
+ dbenv->app_dispatch = app_dispatch;
+ return (0);
+}
+
+static int
+__dbenv_set_encrypt(dbenv, passwd, flags)
+ DB_ENV *dbenv;
+ const char *passwd;
+ u_int32_t flags;
+{
+#ifdef HAVE_CRYPTO
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_encrypt");
+#define OK_CRYPTO_FLAGS (DB_ENCRYPT_AES)
+
+ if (flags != 0 && LF_ISSET(~OK_CRYPTO_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_encrypt", 0));
+
+ if (passwd == NULL || strlen(passwd) == 0) {
+ __db_err(dbenv, "Empty password specified to set_encrypt");
+ return (EINVAL);
+ }
+ if (!CRYPTO_ON(dbenv)) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_CIPHER), &db_cipher))
+ != 0)
+ goto err;
+ dbenv->crypto_handle = db_cipher;
+ } else
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+
+ if (dbenv->passwd != NULL)
+ __os_free(dbenv, dbenv->passwd);
+ if ((ret = __os_strdup(dbenv, passwd, &dbenv->passwd)) != 0) {
+ __os_free(dbenv, db_cipher);
+ goto err;
+ }
+ /*
+ * We're going to need this often enough to keep around
+ */
+ dbenv->passwd_len = strlen(dbenv->passwd) + 1;
+ /*
+ * The MAC key is for checksumming, and is separate from
+ * the algorithm. So initialize it here, even if they
+ * are using CIPHER_ANY.
+ */
+ __db_derive_mac((u_int8_t *)dbenv->passwd,
+ dbenv->passwd_len, db_cipher->mac_key);
+ switch (flags) {
+ case 0:
+ F_SET(db_cipher, CIPHER_ANY);
+ break;
+ case DB_ENCRYPT_AES:
+ if ((ret = __crypto_algsetup(dbenv, db_cipher, CIPHER_AES, 0))
+ != 0)
+ goto err1;
+ break;
+ }
+ return (0);
+
+err1:
+ __os_free(dbenv, dbenv->passwd);
+ __os_free(dbenv, db_cipher);
+ dbenv->crypto_handle = NULL;
+err:
+ return (ret);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(passwd, NULL);
+ COMPQUIET(flags, 0);
+
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+static int
+__dbenv_set_flags(dbenv, flags, onoff)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+#define OK_FLAGS \
+ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \
+ DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
+ DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TXN_NOSYNC | \
+ DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
+
+ if (LF_ISSET(~OK_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
+ if (onoff && LF_ISSET(DB_TXN_WRITE_NOSYNC) && LF_ISSET(DB_TXN_NOSYNC))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 1));
+
+ if (LF_ISSET(DB_AUTO_COMMIT)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_AUTO_COMMIT);
+ else
+ F_CLR(dbenv, DB_ENV_AUTO_COMMIT);
+ }
+ if (LF_ISSET(DB_CDB_ALLDB)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_CDB_ALLDB);
+ else
+ F_CLR(dbenv, DB_ENV_CDB_ALLDB);
+ }
+ if (LF_ISSET(DB_DIRECT_DB)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_DB);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_DB);
+ }
+ if (LF_ISSET(DB_DIRECT_LOG)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_LOG);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_LOG);
+ }
+ if (LF_ISSET(DB_NOLOCKING)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ else
+ F_CLR(dbenv, DB_ENV_NOLOCKING);
+ }
+ if (LF_ISSET(DB_NOMMAP)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOMMAP);
+ else
+ F_CLR(dbenv, DB_ENV_NOMMAP);
+ }
+ if (LF_ISSET(DB_NOPANIC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOPANIC);
+ else
+ F_CLR(dbenv, DB_ENV_NOPANIC);
+ }
+ if (LF_ISSET(DB_OVERWRITE)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_OVERWRITE);
+ else
+ F_CLR(dbenv, DB_ENV_OVERWRITE);
+ }
+ if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv,
+ "set_flags: DB_PANIC_ENVIRONMENT");
+ PANIC_SET(dbenv, onoff);
+ }
+ if (LF_ISSET(DB_REGION_INIT)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_REGION_INIT");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_REGION_INIT);
+ else
+ F_CLR(dbenv, DB_ENV_REGION_INIT);
+ }
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ }
+ if (LF_ISSET(DB_YIELDCPU)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_YIELDCPU);
+ else
+ F_CLR(dbenv, DB_ENV_YIELDCPU);
+ }
+ return (0);
+}
+
+static int
+__dbenv_set_data_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ int ret;
+
+#define DATA_INIT_CNT 20 /* Start with 20 data slots. */
+ if (dbenv->db_data_dir == NULL) {
+ if ((ret = __os_calloc(dbenv, DATA_INIT_CNT,
+ sizeof(char **), &dbenv->db_data_dir)) != 0)
+ return (ret);
+ dbenv->data_cnt = DATA_INIT_CNT;
+ } else if (dbenv->data_next == dbenv->data_cnt - 1) {
+ dbenv->data_cnt *= 2;
+ if ((ret = __os_realloc(dbenv,
+ dbenv->data_cnt * sizeof(char **),
+ &dbenv->db_data_dir)) != 0)
+ return (ret);
+ }
+ return (__os_strdup(dbenv,
+ dir, &dbenv->db_data_dir[dbenv->data_next++]));
+}
+
+static void
+__dbenv_set_errcall(dbenv, errcall)
+ DB_ENV *dbenv;
+ void (*errcall) __P((const char *, char *));
+{
+ dbenv->db_errcall = errcall;
+}
+
+static void
+__dbenv_set_errfile(dbenv, errfile)
+ DB_ENV *dbenv;
+ FILE *errfile;
+{
+ dbenv->db_errfile = errfile;
+}
+
+static void
+__dbenv_set_errpfx(dbenv, errpfx)
+ DB_ENV *dbenv;
+ const char *errpfx;
+{
+ dbenv->db_errpfx = errpfx;
+}
+
+static int
+__dbenv_set_feedback(dbenv, feedback)
+ DB_ENV *dbenv;
+ void (*feedback) __P((DB_ENV *, int, int));
+{
+ dbenv->db_feedback = feedback;
+ return (0);
+}
+
+static void
+__dbenv_set_noticecall(dbenv, noticecall)
+ DB_ENV *dbenv;
+ void (*noticecall) __P((DB_ENV *, db_notices));
+{
+ dbenv->db_noticecall = noticecall;
+}
+
+static int
+__dbenv_set_paniccall(dbenv, paniccall)
+ DB_ENV *dbenv;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ dbenv->db_paniccall = paniccall;
+ return (0);
+}
+
+static int
+__dbenv_set_shm_key(dbenv, shm_key)
+ DB_ENV *dbenv;
+ long shm_key; /* !!!: really a key_t. */
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+
+ dbenv->shm_key = shm_key;
+ return (0);
+}
+
+static int
+__dbenv_set_tas_spins(dbenv, tas_spins)
+ DB_ENV *dbenv;
+ u_int32_t tas_spins;
+{
+ dbenv->tas_spins = tas_spins;
+ return (0);
+}
+
+static int
+__dbenv_set_tmp_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
+}
+
+static int
+__dbenv_set_verbose(dbenv, which, onoff)
+ DB_ENV *dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ switch (which) {
+ case DB_VERB_CHKPOINT:
+ case DB_VERB_DEADLOCK:
+ case DB_VERB_RECOVERY:
+ case DB_VERB_REPLICATION:
+ case DB_VERB_WAITSFOR:
+ if (onoff)
+ FLD_SET(dbenv->verbose, which);
+ else
+ FLD_CLR(dbenv->verbose, which);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_mi_env --
+ * Method illegally called with public environment.
+ *
+ * PUBLIC: int __db_mi_env __P((DB_ENV *, const char *));
+ */
+int
+__db_mi_env(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: method not permitted in shared environment", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_mi_open --
+ * Method illegally called after open.
+ *
+ * PUBLIC: int __db_mi_open __P((DB_ENV *, const char *, int));
+ */
+int
+__db_mi_open(dbenv, name, after)
+ DB_ENV *dbenv;
+ const char *name;
+ int after;
+{
+ __db_err(dbenv, "%s: method not permitted %s open",
+ name, after ? "after" : "before");
+ return (EINVAL);
+}
+
+/*
+ * __db_env_config --
+ * Method or function called without required configuration.
+ *
+ * PUBLIC: int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_env_config(dbenv, i, flags)
+ DB_ENV *dbenv;
+ char *i;
+ u_int32_t flags;
+{
+ char *sub;
+
+ switch (flags) {
+ case DB_INIT_LOCK:
+ sub = "locking";
+ break;
+ case DB_INIT_LOG:
+ sub = "logging";
+ break;
+ case DB_INIT_MPOOL:
+ sub = "memory pool";
+ break;
+ case DB_INIT_TXN:
+ sub = "transaction";
+ break;
+ default:
+ sub = "<unspecified>";
+ break;
+ }
+ __db_err(dbenv,
+ "%s interface requires an environment configured for the %s subsystem",
+ i, sub);
+ return (EINVAL);
+}
+
+static int
+__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *cl;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ COMPQUIET(host, NULL);
+ COMPQUIET(cl, NULL);
+ COMPQUIET(tsec, 0);
+ COMPQUIET(ssec, 0);
+ COMPQUIET(flags, 0);
+
+ __db_err(dbenv,
+ "set_rpc_server method not permitted in non-RPC environment");
+ return (__db_eopnotsup(dbenv));
+}
diff --git a/storage/bdb/env/env_method.c.b b/storage/bdb/env/env_method.c.b
new file mode 100644
index 00000000000..b6802b8a77c
--- /dev/null
+++ b/storage/bdb/env/env_method.c.b
@@ -0,0 +1,643 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_method.c,v 11.87 2002/08/29 14:22:21 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+/*
+ * This is the file that initializes the global array. Do it this way because
+ * people keep changing one without changing the other. Having declaration and
+ * initialization in one file will hopefully fix that.
+ */
+#define DB_INITIALIZE_DB_GLOBALS 1
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
+static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_init __P((DB_ENV *));
+static int __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __dbenv_set_app_dispatch __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
+static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
+static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+static void __dbenv_set_noticecall __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+static int __dbenv_set_rpc_server_noclnt
+ __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
+static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+
+/*
+ * db_env_create --
+ * DB_ENV constructor.
+ *
+ * EXTERN: int db_env_create __P((DB_ENV **, u_int32_t));
+ */
+int
+db_env_create(dbenvpp, flags)
+ DB_ENV **dbenvpp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * !!!
+ * We can't call the flags-checking routines, we don't have an
+ * environment yet.
+ */
+ if (flags != 0 && flags != DB_CLIENT)
+ return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(*dbenv), &dbenv)) != 0)
+ return (ret);
+
+#ifdef HAVE_RPC
+ if (LF_ISSET(DB_CLIENT))
+ F_SET(dbenv, DB_ENV_RPCCLIENT);
+#endif
+ ret = __dbenv_init(dbenv);
+
+ if (ret != 0) {
+ __os_free(NULL, dbenv);
+ return (ret);
+ }
+
+ *dbenvpp = dbenv;
+ return (0);
+}
+
+/*
+ * __dbenv_init --
+ * Initialize a DB_ENV structure.
+ */
+static int
+__dbenv_init(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * Set up methods that are the same in both normal and RPC
+ */
+ dbenv->err = __dbenv_err;
+ dbenv->errx = __dbenv_errx;
+ dbenv->set_errcall = __dbenv_set_errcall;
+ dbenv->set_errfile = __dbenv_set_errfile;
+ dbenv->set_errpfx = __dbenv_set_errpfx;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->close = __dbcl_env_close;
+ dbenv->dbremove = __dbcl_env_dbremove;
+ dbenv->dbrename = __dbcl_env_dbrename;
+ dbenv->open = __dbcl_env_open_wrap;
+ dbenv->remove = __dbcl_env_remove;
+ dbenv->set_alloc = __dbcl_env_alloc;
+ dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
+ dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_encrypt = __dbcl_env_encrypt;
+ dbenv->set_feedback = __dbcl_env_set_feedback;
+ dbenv->set_flags = __dbcl_env_flags;
+ dbenv->set_noticecall = __dbcl_env_noticecall;
+ dbenv->set_paniccall = __dbcl_env_paniccall;
+ dbenv->set_rpc_server = __dbcl_envrpcserver;
+ dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tas_spins = __dbcl_set_tas_spins;
+ dbenv->set_timeout = __dbcl_set_timeout;
+ dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
+ dbenv->set_verbose = __dbcl_set_verbose;
+ } else {
+#endif
+ dbenv->close = __dbenv_close;
+ dbenv->dbremove = __dbenv_dbremove;
+ dbenv->dbrename = __dbenv_dbrename;
+ dbenv->open = __dbenv_open;
+ dbenv->remove = __dbenv_remove;
+ dbenv->set_alloc = __dbenv_set_alloc;
+ dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
+ dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_encrypt = __dbenv_set_encrypt;
+ dbenv->set_feedback = __dbenv_set_feedback;
+ dbenv->set_flags = __dbenv_set_flags;
+ dbenv->set_noticecall = __dbcl_env_noticecall;
+ dbenv->set_paniccall = __dbenv_set_paniccall;
+ dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
+ dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tas_spins = __dbenv_set_tas_spins;
+ dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
+ dbenv->set_verbose = __dbenv_set_verbose;
+#ifdef HAVE_RPC
+ }
+#endif
+ dbenv->shm_key = INVALID_REGION_SEGID;
+ dbenv->db_ref = 0;
+
+ __log_dbenv_create(dbenv); /* Subsystem specific. */
+ __lock_dbenv_create(dbenv);
+ __memp_dbenv_create(dbenv);
+ __rep_dbenv_create(dbenv);
+ __txn_dbenv_create(dbenv);
+
+ return (0);
+}
+
+/*
+ * __dbenv_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+#else
+__dbenv_err(dbenv, error, fmt, va_alist)
+ const DB_ENV *dbenv;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbenv_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__dbenv_errx(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 1, fmt);
+}
+
+static int
+__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+ DB_ENV *dbenv;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_alloc");
+
+ dbenv->db_malloc = mal_func;
+ dbenv->db_realloc = real_func;
+ dbenv->db_free = free_func;
+ return (0);
+}
+
+/*
+ * __dbenv_set_app_dispatch --
+ * Set the transaction abort recover function.
+ */
+static int
+__dbenv_set_app_dispatch(dbenv, app_dispatch)
+ DB_ENV *dbenv;
+ int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_app_dispatch");
+
+ dbenv->app_dispatch = app_dispatch;
+ return (0);
+}
+
+static int
+__dbenv_set_encrypt(dbenv, passwd, flags)
+ DB_ENV *dbenv;
+ const char *passwd;
+ u_int32_t flags;
+{
+#ifdef HAVE_CRYPTO
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_encrypt");
+#define OK_CRYPTO_FLAGS (DB_ENCRYPT_AES)
+
+ if (flags != 0 && LF_ISSET(~OK_CRYPTO_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_encrypt", 0));
+
+ if (passwd == NULL || strlen(passwd) == 0) {
+ __db_err(dbenv, "Empty password specified to set_encrypt");
+ return (EINVAL);
+ }
+ if (!CRYPTO_ON(dbenv)) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_CIPHER), &db_cipher))
+ != 0)
+ goto err;
+ dbenv->crypto_handle = db_cipher;
+ } else
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+
+ if (dbenv->passwd != NULL)
+ __os_free(dbenv, dbenv->passwd);
+ if ((ret = __os_strdup(dbenv, passwd, &dbenv->passwd)) != 0) {
+ __os_free(dbenv, db_cipher);
+ goto err;
+ }
+ /*
+ * We're going to need this often enough to keep around
+ */
+ dbenv->passwd_len = strlen(dbenv->passwd) + 1;
+ /*
+ * The MAC key is for checksumming, and is separate from
+ * the algorithm. So initialize it here, even if they
+ * are using CIPHER_ANY.
+ */
+ __db_derive_mac((u_int8_t *)dbenv->passwd,
+ dbenv->passwd_len, db_cipher->mac_key);
+ switch (flags) {
+ case 0:
+ F_SET(db_cipher, CIPHER_ANY);
+ break;
+ case DB_ENCRYPT_AES:
+ if ((ret = __crypto_algsetup(dbenv, db_cipher, CIPHER_AES, 0))
+ != 0)
+ goto err1;
+ break;
+ }
+ return (0);
+
+err1:
+ __os_free(dbenv, dbenv->passwd);
+ __os_free(dbenv, db_cipher);
+ dbenv->crypto_handle = NULL;
+err:
+ return (ret);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(passwd, NULL);
+ COMPQUIET(flags, 0);
+
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+static int
+__dbenv_set_flags(dbenv, flags, onoff)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+#define OK_FLAGS \
+ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \
+ DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
+ DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TXN_NOSYNC | \
+ DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
+
+ if (LF_ISSET(~OK_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
+ if (onoff && LF_ISSET(DB_TXN_WRITE_NOSYNC) && LF_ISSET(DB_TXN_NOSYNC))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 1));
+
+ if (LF_ISSET(DB_AUTO_COMMIT)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_AUTO_COMMIT);
+ else
+ F_CLR(dbenv, DB_ENV_AUTO_COMMIT);
+ }
+ if (LF_ISSET(DB_CDB_ALLDB)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_CDB_ALLDB);
+ else
+ F_CLR(dbenv, DB_ENV_CDB_ALLDB);
+ }
+ if (LF_ISSET(DB_DIRECT_DB)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_DB);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_DB);
+ }
+ if (LF_ISSET(DB_DIRECT_LOG)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_LOG);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_LOG);
+ }
+ if (LF_ISSET(DB_NOLOCKING)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ else
+ F_CLR(dbenv, DB_ENV_NOLOCKING);
+ }
+ if (LF_ISSET(DB_NOMMAP)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOMMAP);
+ else
+ F_CLR(dbenv, DB_ENV_NOMMAP);
+ }
+ if (LF_ISSET(DB_NOPANIC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOPANIC);
+ else
+ F_CLR(dbenv, DB_ENV_NOPANIC);
+ }
+ if (LF_ISSET(DB_OVERWRITE)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_OVERWRITE);
+ else
+ F_CLR(dbenv, DB_ENV_OVERWRITE);
+ }
+ if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv,
+ "set_flags: DB_PANIC_ENVIRONMENT");
+ PANIC_SET(dbenv, onoff);
+ }
+ if (LF_ISSET(DB_REGION_INIT)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_REGION_INIT");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_REGION_INIT);
+ else
+ F_CLR(dbenv, DB_ENV_REGION_INIT);
+ }
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ }
+ if (LF_ISSET(DB_YIELDCPU)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_YIELDCPU);
+ else
+ F_CLR(dbenv, DB_ENV_YIELDCPU);
+ }
+ return (0);
+}
+
+static int
+__dbenv_set_data_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ int ret;
+
+#define DATA_INIT_CNT 20 /* Start with 20 data slots. */
+ if (dbenv->db_data_dir == NULL) {
+ if ((ret = __os_calloc(dbenv, DATA_INIT_CNT,
+ sizeof(char **), &dbenv->db_data_dir)) != 0)
+ return (ret);
+ dbenv->data_cnt = DATA_INIT_CNT;
+ } else if (dbenv->data_next == dbenv->data_cnt - 1) {
+ dbenv->data_cnt *= 2;
+ if ((ret = __os_realloc(dbenv,
+ dbenv->data_cnt * sizeof(char **),
+ &dbenv->db_data_dir)) != 0)
+ return (ret);
+ }
+ return (__os_strdup(dbenv,
+ dir, &dbenv->db_data_dir[dbenv->data_next++]));
+}
+
+static void
+__dbenv_set_errcall(dbenv, errcall)
+ DB_ENV *dbenv;
+ void (*errcall) __P((const char *, char *));
+{
+ dbenv->db_errcall = errcall;
+}
+
+static void
+__dbenv_set_errfile(dbenv, errfile)
+ DB_ENV *dbenv;
+ FILE *errfile;
+{
+ dbenv->db_errfile = errfile;
+}
+
+static void
+__dbenv_set_errpfx(dbenv, errpfx)
+ DB_ENV *dbenv;
+ const char *errpfx;
+{
+ dbenv->db_errpfx = errpfx;
+}
+
+static int
+__dbenv_set_feedback(dbenv, feedback)
+ DB_ENV *dbenv;
+ void (*feedback) __P((DB_ENV *, int, int));
+{
+ dbenv->db_feedback = feedback;
+ return (0);
+}
+
+static void
+__dbenv_set_noticecall(dbenv, noticecall)
+ DB_ENV *dbenv;
+ void (*noticecall) __P((DB_ENV *, db_notices));
+{
+ dbenv->db_noticecall = noticecall;
+}
+
+static int
+__dbenv_set_paniccall(dbenv, paniccall)
+ DB_ENV *dbenv;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ dbenv->db_paniccall = paniccall;
+ return (0);
+}
+
+static int
+__dbenv_set_shm_key(dbenv, shm_key)
+ DB_ENV *dbenv;
+ long shm_key; /* !!!: really a key_t. */
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+
+ dbenv->shm_key = shm_key;
+ return (0);
+}
+
+static int
+__dbenv_set_tas_spins(dbenv, tas_spins)
+ DB_ENV *dbenv;
+ u_int32_t tas_spins;
+{
+ dbenv->tas_spins = tas_spins;
+ return (0);
+}
+
+static int
+__dbenv_set_tmp_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
+}
+
+static int
+__dbenv_set_verbose(dbenv, which, onoff)
+ DB_ENV *dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ switch (which) {
+ case DB_VERB_CHKPOINT:
+ case DB_VERB_DEADLOCK:
+ case DB_VERB_RECOVERY:
+ case DB_VERB_REPLICATION:
+ case DB_VERB_WAITSFOR:
+ if (onoff)
+ FLD_SET(dbenv->verbose, which);
+ else
+ FLD_CLR(dbenv->verbose, which);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_mi_env --
+ * Method illegally called with public environment.
+ *
+ * PUBLIC: int __db_mi_env __P((DB_ENV *, const char *));
+ */
+int
+__db_mi_env(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: method not permitted in shared environment", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_mi_open --
+ * Method illegally called after open.
+ *
+ * PUBLIC: int __db_mi_open __P((DB_ENV *, const char *, int));
+ */
+int
+__db_mi_open(dbenv, name, after)
+ DB_ENV *dbenv;
+ const char *name;
+ int after;
+{
+ __db_err(dbenv, "%s: method not permitted %s open",
+ name, after ? "after" : "before");
+ return (EINVAL);
+}
+
+/*
+ * __db_env_config --
+ * Method or function called without required configuration.
+ *
+ * PUBLIC: int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_env_config(dbenv, i, flags)
+ DB_ENV *dbenv;
+ char *i;
+ u_int32_t flags;
+{
+ char *sub;
+
+ switch (flags) {
+ case DB_INIT_LOCK:
+ sub = "locking";
+ break;
+ case DB_INIT_LOG:
+ sub = "logging";
+ break;
+ case DB_INIT_MPOOL:
+ sub = "memory pool";
+ break;
+ case DB_INIT_TXN:
+ sub = "transaction";
+ break;
+ default:
+ sub = "<unspecified>";
+ break;
+ }
+ __db_err(dbenv,
+ "%s interface requires an environment configured for the %s subsystem",
+ i, sub);
+ return (EINVAL);
+}
+
+static int
+__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *cl;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ COMPQUIET(host, NULL);
+ COMPQUIET(cl, NULL);
+ COMPQUIET(tsec, 0);
+ COMPQUIET(ssec, 0);
+ COMPQUIET(flags, 0);
+
+ __db_err(dbenv,
+ "set_rpc_server method not permitted in non-RPC environment");
+ return (__db_eopnotsup(dbenv));
+}
diff --git a/storage/bdb/env/env_open.c b/storage/bdb/env/env_open.c
new file mode 100644
index 00000000000..ae8399f61cd
--- /dev/null
+++ b/storage/bdb/env/env_open.c
@@ -0,0 +1,1191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_open.c,v 11.111 2002/09/03 01:20:51 mjc Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/fop.h"
+
+static int __db_parse __P((DB_ENV *, char *));
+static int __db_tmp_open __P((DB_ENV *, u_int32_t, char *, DB_FH *));
+static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_iremove __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_refresh __P((DB_ENV *, u_int32_t));
+
+/*
+ * db_version --
+ * Return version information.
+ *
+ * EXTERN: char *db_version __P((int *, int *, int *));
+ */
+char *
+db_version(majverp, minverp, patchp)
+ int *majverp, *minverp, *patchp;
+{
+ if (majverp != NULL)
+ *majverp = DB_VERSION_MAJOR;
+ if (minverp != NULL)
+ *minverp = DB_VERSION_MINOR;
+ if (patchp != NULL)
+ *patchp = DB_VERSION_PATCH;
+ return ((char *)DB_VERSION_STRING);
+}
+
+/*
+ * __dbenv_open --
+ * Initialize an environment.
+ *
+ * PUBLIC: int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbenv_open(dbenv, db_home, flags, mode)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+ int mode;
+{
+ DB_MPOOL *dbmp;
+ int ret;
+ u_int32_t init_flags, orig_flags;
+
+ orig_flags = dbenv->flags;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | \
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_JOINENV | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | DB_SYSTEM_MEM | \
+ DB_THREAD | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+#undef OKFLAGS_CDB
+#define OKFLAGS_CDB \
+ DB_CREATE | DB_INIT_CDB | DB_INIT_MPOOL | DB_LOCKDOWN | \
+ DB_PRIVATE | DB_SYSTEM_MEM | DB_THREAD | \
+ DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /*
+ * Flags saved in the init_flags field of the environment, representing
+ * flags to DB_ENV->set_flags and DB_ENV->open that need to be set.
+ */
+#define DB_INITENV_CDB 0x0001 /* DB_INIT_CDB */
+#define DB_INITENV_CDB_ALLDB 0x0002 /* DB_INIT_CDB_ALLDB */
+#define DB_INITENV_LOCK 0x0004 /* DB_INIT_LOCK */
+#define DB_INITENV_LOG 0x0008 /* DB_INIT_LOG */
+#define DB_INITENV_MPOOL 0x0010 /* DB_INIT_MPOOL */
+#define DB_INITENV_TXN 0x0020 /* DB_INIT_TXN */
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_INIT_CDB) &&
+ (ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS_CDB)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->open", flags, DB_RECOVER, DB_RECOVER_FATAL)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->open", flags, DB_JOINENV,
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_PRIVATE)) != 0)
+ return (ret);
+
+ /*
+ * Currently we support one kind of mutex that is intra-process only,
+ * POSIX 1003.1 pthreads, because a variety of systems don't support
+ * the full pthreads API, and our only alternative is test-and-set.
+ */
+#ifdef HAVE_MUTEX_THREAD_ONLY
+ if (!LF_ISSET(DB_PRIVATE)) {
+ __db_err(dbenv,
+ "Berkeley DB library configured to support only DB_PRIVATE environments");
+ return (EINVAL);
+ }
+#endif
+
+ /*
+ * If we're doing recovery, destroy the environment so that we create
+ * all the regions from scratch. I'd like to reuse already created
+ * regions, but that's hard. We would have to create the environment
+ * region from scratch, at least, as we have no way of knowing if its
+ * linked lists are corrupted.
+ *
+ * I suppose we could set flags while modifying those links, but that
+ * is going to be difficult to get right. The major concern I have
+ * is if the application stomps the environment with a rogue pointer.
+ * We have no way of detecting that, and we could be forced into a
+ * situation where we start up and then crash, repeatedly.
+ *
+ * Note that we do not check any flags like DB_PRIVATE before calling
+ * remove. We don't care if the current environment was private or
+ * not, we just want to nail any files that are left-over for whatever
+ * reason, from whatever session.
+ */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))
+ if ((ret = __dbenv_iremove(dbenv, db_home, DB_FORCE)) != 0 ||
+ (ret = __dbenv_refresh(dbenv, orig_flags)) != 0)
+ return (ret);
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ goto err;
+
+ /* Convert the DB_ENV->open flags to internal flags. */
+ if (LF_ISSET(DB_CREATE))
+ F_SET(dbenv, DB_ENV_CREATE);
+ if (LF_ISSET(DB_LOCKDOWN))
+ F_SET(dbenv, DB_ENV_LOCKDOWN);
+ if (LF_ISSET(DB_PRIVATE))
+ F_SET(dbenv, DB_ENV_PRIVATE);
+ if (LF_ISSET(DB_RECOVER_FATAL))
+ F_SET(dbenv, DB_ENV_FATAL);
+ if (LF_ISSET(DB_SYSTEM_MEM))
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ if (LF_ISSET(DB_THREAD))
+ F_SET(dbenv, DB_ENV_THREAD);
+
+ /* Default permissions are read-write for both owner and group. */
+ dbenv->db_mode = mode == 0 ? __db_omode("rwrw--") : mode;
+
+ /*
+ * Create/join the environment. We pass in the flags that
+ * will be of interest to an environment joining later; if
+ * we're not the ones to do the create, we
+ * pull out whatever has been stored, if we don't do a create.
+ */
+ init_flags = 0;
+ init_flags |= (LF_ISSET(DB_INIT_CDB) ? DB_INITENV_CDB : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOCK) ? DB_INITENV_LOCK : 0);
+ init_flags |= (LF_ISSET(DB_INIT_LOG) ? DB_INITENV_LOG : 0);
+ init_flags |= (LF_ISSET(DB_INIT_MPOOL) ? DB_INITENV_MPOOL : 0);
+ init_flags |= (LF_ISSET(DB_INIT_TXN) ? DB_INITENV_TXN : 0);
+ init_flags |=
+ (F_ISSET(dbenv, DB_ENV_CDB_ALLDB) ? DB_INITENV_CDB_ALLDB : 0);
+
+ if ((ret = __db_e_attach(dbenv, &init_flags)) != 0)
+ goto err;
+
+ /*
+ * __db_e_attach will return the saved init_flags field, which
+ * contains the DB_INIT_* flags used when we were created.
+ */
+ if (LF_ISSET(DB_JOINENV)) {
+ LF_CLR(DB_JOINENV);
+
+ LF_SET((init_flags & DB_INITENV_CDB) ? DB_INIT_CDB : 0);
+ LF_SET((init_flags & DB_INITENV_LOCK) ? DB_INIT_LOCK : 0);
+ LF_SET((init_flags & DB_INITENV_LOG) ? DB_INIT_LOG : 0);
+ LF_SET((init_flags & DB_INITENV_MPOOL) ? DB_INIT_MPOOL : 0);
+ LF_SET((init_flags & DB_INITENV_TXN) ? DB_INIT_TXN : 0);
+
+ if (LF_ISSET(DB_INITENV_CDB_ALLDB) &&
+ (ret = dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1)) != 0)
+ goto err;
+ }
+
+ /* Initialize for CDB product. */
+ if (LF_ISSET(DB_INIT_CDB)) {
+ LF_SET(DB_INIT_LOCK);
+ F_SET(dbenv, DB_ENV_CDB);
+ }
+
+ /*
+ * Initialize the subsystems. Transactions imply logging but do not
+ * imply locking. While almost all applications want both locking
+ * and logging, it would not be unreasonable for a single threaded
+ * process to want transactions for atomicity guarantees, but not
+ * necessarily need concurrency.
+ */
+
+ if (LF_ISSET(DB_INIT_MPOOL))
+ if ((ret = __memp_open(dbenv)) != 0)
+ goto err;
+
+#ifdef HAVE_CRYPTO
+ /*
+ * Initialize the ciphering area prior to any running of recovery so
+ * that we can initialize the keys, etc. before recovery.
+ *
+ * !!!
+ * This must be after the mpool init, but before the log initialization
+ * because log_open may attempt to run log_recover during its open.
+ */
+ if ((ret = __crypto_region_init(dbenv)) != 0)
+ goto err;
+#endif
+
+ if (LF_ISSET(DB_INIT_LOG | DB_INIT_TXN))
+ if ((ret = __log_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_LOCK))
+ if ((ret = __lock_open(dbenv)) != 0)
+ goto err;
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __txn_open(dbenv)) != 0)
+ goto err;
+
+ /*
+ * If the application is running with transactions, initialize
+ * the function tables.
+ */
+ if ((ret = __bam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __crdel_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __db_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __dbreg_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __fop_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __ham_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __qam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+ if ((ret = __txn_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
+ goto err;
+
+ /* Perform recovery for any previous run. */
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __db_apprec(dbenv, NULL,
+ LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0)
+ goto err;
+ }
+
+ /* Initialize the replication area just in case. */
+ if ((ret = __rep_region_init(dbenv)) != 0)
+ goto err;
+
+ /*
+ * Initialize the DB list, and its mutex as necessary. If the env
+ * handle isn't free-threaded we don't need a mutex because there
+ * will never be more than a single DB handle on the list. If the
+ * mpool wasn't initialized, then we can't ever open a DB handle.
+ *
+ * We also need to initialize the MT mutex as necessary, so do them
+ * both. If we error, __dbenv_refresh() will clean up.
+ *
+ * !!!
+ * This must come after the __memp_open call above because if we are
+ * recording mutexes for system resources, we will do it in the mpool
+ * region for environments and db handles. So, the mpool region must
+ * already be initialized.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (F_ISSET(dbenv, DB_ENV_THREAD) && LF_ISSET(DB_INIT_MPOOL)) {
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->dblist_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->mt_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ /*
+ * If we've created the regions, are running with transactions, and did
+ * not just run recovery, we need to log the fact that the transaction
+ * IDs got reset.
+ *
+ * If we ran recovery, there may be prepared-but-not-yet-committed
+ * transactions that need to be resolved. Recovery resets the minimum
+ * transaction ID and logs the reset if that's appropriate, so we
+ * don't need to do anything here in the recover case.
+ */
+ if (TXN_ON(dbenv) &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE) &&
+ !LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __txn_reset(dbenv)) != 0)
+ goto err;
+
+ return (0);
+
+err: /* If we fail after creating the regions, remove them. */
+ if (dbenv->reginfo != NULL &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE)) {
+ ret = __db_panic(dbenv, ret);
+
+ (void)__dbenv_refresh(dbenv, orig_flags);
+ (void)__dbenv_iremove(dbenv, db_home, DB_FORCE);
+ }
+ (void)__dbenv_refresh(dbenv, orig_flags);
+
+ return (ret);
+}
+
+/*
+ * __dbenv_remove --
+ * Discard an environment.
+ *
+ * PUBLIC: int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__dbenv_remove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret, t_ret;
+
+ ret = __dbenv_iremove(dbenv, db_home, flags);
+
+ if ((t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __dbenv_iremove --
+ * Discard an environment, internal version.
+ */
+static int
+__dbenv_iremove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret;
+
+#undef OKFLAGS
+#define OKFLAGS \
+ DB_FORCE | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->remove", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->remove");
+
+ /* Initialize the DB_ENV structure. */
+ if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
+ return (ret);
+
+ /* Remove the environment. */
+ return (__db_e_remove(dbenv, flags));
+}
+
+/*
+ * __dbenv_config --
+ * Initialize the DB_ENV structure.
+ */
+static int
+__dbenv_config(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ FILE *fp;
+ int ret;
+ char *p, buf[256];
+
+ /*
+ * Set the database home. Do this before calling __db_appname,
+ * it uses the home directory.
+ */
+ if ((ret = __db_home(dbenv, db_home, flags)) != 0)
+ return (ret);
+
+ /* Parse the config file. */
+ if ((ret =
+ __db_appname(dbenv, DB_APP_NONE, "DB_CONFIG", 0, NULL, &p)) != 0)
+ return (ret);
+
+ fp = fopen(p, "r");
+ __os_free(dbenv, p);
+
+ if (fp != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((p = strchr(buf, '\n')) != NULL)
+ *p = '\0';
+ else if (strlen(buf) + 1 == sizeof(buf)) {
+ __db_err(dbenv, "DB_CONFIG: line too long");
+ (void)fclose(fp);
+ return (EINVAL);
+ }
+ if (buf[0] == '\0' ||
+ buf[0] == '#' || isspace((int)buf[0]))
+ continue;
+
+ if ((ret = __db_parse(dbenv, buf)) != 0) {
+ (void)fclose(fp);
+ return (ret);
+ }
+ }
+ (void)fclose(fp);
+ }
+
+ /*
+ * If no temporary directory path was specified in the config file,
+ * choose one.
+ */
+ if (dbenv->db_tmp_dir == NULL && (ret = __os_tmpdir(dbenv, flags)) != 0)
+ return (ret);
+
+ /*
+ * The locking file descriptor is rarely on. Set the fd to -1, not
+ * because it's ever tested, but to make sure we catch mistakes.
+ */
+ if ((ret = __os_calloc(
+ dbenv, 1, sizeof(*dbenv->lockfhp), &dbenv->lockfhp)) != 0)
+ return (ret);
+ dbenv->lockfhp->fd = -1;
+
+ /* Flag that the DB_ENV structure has been initialized. */
+ F_SET(dbenv, DB_ENV_OPEN_CALLED);
+
+ return (0);
+}
+
+/*
+ * __dbenv_close --
+ * DB_ENV destructor.
+ *
+ * PUBLIC: int __dbenv_close __P((DB_ENV *, u_int32_t));
+ */
+int
+__dbenv_close(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ char **p;
+ int ret, t_ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /*
+ * Before checking the reference count, we have to see if we
+ * were in the middle of restoring transactions and need to
+ * close the open files.
+ */
+ if (TXN_ON(dbenv) && (t_ret = __txn_preclose(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->rep_handle != NULL &&
+ (t_ret = __rep_preclose(dbenv, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->db_ref != 0) {
+ __db_err(dbenv,
+ "Database handles open during environment close");
+ if (ret == 0)
+ ret = EINVAL;
+ }
+
+ /*
+ * Detach from the regions and undo the allocations done by
+ * DB_ENV->open.
+ */
+ if ((t_ret = __dbenv_refresh(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Do per-subsystem destruction. */
+ __lock_dbenv_close(dbenv); /* void */
+ if ((t_ret = __rep_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+#ifdef HAVE_CRYPTO
+ if ((t_ret = __crypto_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+#endif
+
+ /* Release any string-based configuration parameters we've copied. */
+ if (dbenv->db_log_dir != NULL)
+ __os_free(dbenv, dbenv->db_log_dir);
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ if (dbenv->db_data_dir != NULL) {
+ for (p = dbenv->db_data_dir; *p != NULL; ++p)
+ __os_free(dbenv, *p);
+ __os_free(dbenv, dbenv->db_data_dir);
+ }
+
+ /* Discard the structure. */
+ memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
+ __os_free(NULL, dbenv);
+
+ return (ret);
+}
+
+/*
+ * __dbenv_refresh --
+ * Refresh the DB_ENV structure, releasing resources allocated by
+ * DB_ENV->open, and returning it to the state it was in just before
+ * open was called. (Note that this means that any state set by
+ * pre-open configuration functions must be preserved.)
+ */
+static int
+__dbenv_refresh(dbenv, orig_flags)
+ DB_ENV *dbenv;
+ u_int32_t orig_flags;
+{
+ DB_MPOOL *dbmp;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * Close subsystems, in the reverse order they were opened (txn
+ * must be first, it may want to discard locks and flush the log).
+ *
+ * !!!
+ * Note that these functions, like all of __dbenv_refresh, only undo
+ * the effects of __dbenv_open. Functions that undo work done by
+ * db_env_create or by a configurator function should go in
+ * __dbenv_close.
+ */
+ if (TXN_ON(dbenv) &&
+ (t_ret = __txn_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = __log_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Locking should come after logging, because closing log results
+ * in files closing which may require locks being released.
+ */
+ if (LOCKING_ON(dbenv) &&
+ (t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Discard DB list and its mutex.
+ * Discard the MT mutex.
+ *
+ * !!!
+ * This must be done before we close the mpool region because we
+ * may have allocated the DB handle mutex in the mpool region.
+ * It must be done *after* we close the log region, though, because
+ * we close databases and try to acquire the mutex when we close
+ * log file handles. Ick.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (dbenv->dblist_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->dblist_mutexp);
+ }
+ if (dbenv->mt_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->mt_mutexp);
+ }
+ if (dbenv->mt != NULL) {
+ __os_free(dbenv, dbenv->mt);
+ dbenv->mt = NULL;
+ }
+
+ if (MPOOL_ON(dbenv)) {
+ /*
+ * If it's a private environment, flush the contents to disk.
+ * Recovery would have put everything back together, but it's
+ * faster and cleaner to flush instead.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE) &&
+ (t_ret = dbenv->memp_sync(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __memp_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* Detach from the region. */
+ if (dbenv->reginfo != NULL) {
+ if ((t_ret = __db_e_detach(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * !!!
+ * Don't free dbenv->reginfo or set the reference to NULL,
+ * that was done by __db_e_detach().
+ */
+ }
+
+ /* Undo changes and allocations done by __dbenv_open. */
+ if (dbenv->db_home != NULL) {
+ __os_free(dbenv, dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+
+ dbenv->db_mode = 0;
+
+ if (dbenv->lockfhp != NULL) {
+ __os_free(dbenv, dbenv->lockfhp);
+ dbenv->lockfhp = NULL;
+ }
+
+ if (dbenv->recover_dtab != NULL) {
+ __os_free(dbenv, dbenv->recover_dtab);
+ dbenv->recover_dtab = NULL;
+ dbenv->recover_dtab_size = 0;
+ }
+
+ dbenv->flags = orig_flags;
+
+ return (ret);
+}
+
+#define DB_ADDSTR(add) { \
+ if ((add) != NULL) { \
+ /* If leading slash, start over. */ \
+ if (__os_abspath(add)) { \
+ p = str; \
+ slash = 0; \
+ } \
+ /* Append to the current string. */ \
+ len = strlen(add); \
+ if (slash) \
+ *p++ = PATH_SEPARATOR[0]; \
+ memcpy(p, add, len); \
+ p += len; \
+ slash = strchr(PATH_SEPARATOR, p[-1]) == NULL; \
+ } \
+}
+
+/*
+ * __db_appname --
+ * Given an optional DB environment, directory and file name and type
+ * of call, build a path based on the DB_ENV->open rules, and return
+ * it in allocated space.
+ *
+ * PUBLIC: int __db_appname __P((DB_ENV *, APPNAME,
+ * PUBLIC: const char *, u_int32_t, DB_FH *, char **));
+ */
+int
+__db_appname(dbenv, appname, file, tmp_oflags, fhp, namep)
+ DB_ENV *dbenv;
+ APPNAME appname;
+ const char *file;
+ u_int32_t tmp_oflags;
+ DB_FH *fhp;
+ char **namep;
+{
+ size_t len, str_len;
+ int data_entry, ret, slash, tmp_create;
+ const char *a, *b;
+ char *p, *str;
+
+ a = b = NULL;
+ data_entry = -1;
+ tmp_create = 0;
+
+ /*
+ * We don't return a name when creating temporary files, just a file
+ * handle. Default to an error now.
+ */
+ if (fhp != NULL)
+ F_CLR(fhp, DB_FH_VALID);
+ if (namep != NULL)
+ *namep = NULL;
+
+ /*
+ * Absolute path names are never modified. If the file is an absolute
+ * path, we're done.
+ */
+ if (file != NULL && __os_abspath(file))
+ return (__os_strdup(dbenv, file, namep));
+
+ /* Everything else is relative to the environment home. */
+ if (dbenv != NULL)
+ a = dbenv->db_home;
+
+retry: /*
+ * DB_APP_NONE:
+ * DB_HOME/file
+ * DB_APP_DATA:
+ * DB_HOME/DB_DATA_DIR/file
+ * DB_APP_LOG:
+ * DB_HOME/DB_LOG_DIR/file
+ * DB_APP_TMP:
+ * DB_HOME/DB_TMP_DIR/<create>
+ */
+ switch (appname) {
+ case DB_APP_NONE:
+ break;
+ case DB_APP_DATA:
+ if (dbenv != NULL && dbenv->db_data_dir != NULL &&
+ (b = dbenv->db_data_dir[++data_entry]) == NULL) {
+ data_entry = -1;
+ b = dbenv->db_data_dir[0];
+ }
+ break;
+ case DB_APP_LOG:
+ if (dbenv != NULL)
+ b = dbenv->db_log_dir;
+ break;
+ case DB_APP_TMP:
+ if (dbenv != NULL)
+ b = dbenv->db_tmp_dir;
+ tmp_create = 1;
+ break;
+ }
+
+ len =
+ (a == NULL ? 0 : strlen(a) + 1) +
+ (b == NULL ? 0 : strlen(b) + 1) +
+ (file == NULL ? 0 : strlen(file) + 1);
+
+ /*
+ * Allocate space to hold the current path information, as well as any
+ * temporary space that we're going to need to create a temporary file
+ * name.
+ */
+#define DB_TRAIL "BDBXXXXXX"
+ str_len = len + sizeof(DB_TRAIL) + 10;
+ if ((ret = __os_malloc(dbenv, str_len, &str)) != 0)
+ return (ret);
+
+ slash = 0;
+ p = str;
+ DB_ADDSTR(a);
+ DB_ADDSTR(b);
+ DB_ADDSTR(file);
+ *p = '\0';
+
+ /*
+ * If we're opening a data file, see if it exists. If it does,
+ * return it, otherwise, try and find another one to open.
+ */
+ if (__os_exists(str, NULL) != 0 && data_entry != -1) {
+ __os_free(dbenv, str);
+ b = NULL;
+ goto retry;
+ }
+
+ /* Create the file if so requested. */
+ if (tmp_create &&
+ (ret = __db_tmp_open(dbenv, tmp_oflags, str, fhp)) != 0) {
+ __os_free(dbenv, str);
+ return (ret);
+ }
+
+ if (namep == NULL)
+ __os_free(dbenv, str);
+ else
+ *namep = str;
+ return (0);
+}
+
+/*
+ * __db_home --
+ * Find the database home.
+ *
+ * PUBLIC: int __db_home __P((DB_ENV *, const char *, u_int32_t));
+ */
+int
+__db_home(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ const char *p;
+
+ /*
+ * Use db_home by default, this allows utilities to reasonably
+ * override the environment either explicitly or by using a -h
+ * option. Otherwise, use the environment if it's permitted
+ * and initialized.
+ */
+ if ((p = db_home) == NULL &&
+ (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) &&
+ (p = getenv("DB_HOME")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal DB_HOME environment variable");
+ return (EINVAL);
+ }
+
+ return (p == NULL ? 0 : __os_strdup(dbenv, p, &dbenv->db_home));
+}
+
+#define __DB_OVFL(v, max) \
+ if (v > max) { \
+ __v = v; \
+ __max = max; \
+ goto toobig; \
+ }
+
+/*
+ * __db_parse --
+ * Parse a single NAME VALUE pair.
+ */
+static int
+__db_parse(dbenv, s)
+ DB_ENV *dbenv;
+ char *s;
+{
+ u_long __max, __v, v1, v2, v3;
+ u_int32_t flags;
+ char *name, *p, *value, v4;
+
+ /*
+ * !!!
+ * The value of 40 is hard-coded into format arguments to sscanf
+ * below, it can't be changed here without changing it there, too.
+ */
+ char arg[40];
+
+ /*
+ * Name/value pairs are parsed as two white-space separated strings.
+ * Leading and trailing white-space is trimmed from the value, but
+ * it may contain embedded white-space. Note: we use the isspace(3)
+ * macro because it's more portable, but that means that you can use
+ * characters like form-feed to separate the strings.
+ */
+ name = s;
+ for (p = name; *p != '\0' && !isspace((int)*p); ++p)
+ ;
+ if (*p == '\0' || p == name)
+ goto illegal;
+ *p = '\0';
+ for (++p; isspace((int)*p); ++p)
+ ;
+ if (*p == '\0')
+ goto illegal;
+ value = p;
+ for (++p; *p != '\0'; ++p)
+ ;
+ for (--p; isspace((int)*p); --p)
+ ;
+ ++p;
+ if (p == value) {
+illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
+ return (EINVAL);
+ }
+ *p = '\0';
+
+ if (!strcasecmp(name, "set_cachesize")) {
+ if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ __DB_OVFL(v2, UINT32_T_MAX);
+ __DB_OVFL(v3, 10000);
+ return (dbenv->set_cachesize(
+ dbenv, (u_int32_t)v1, (u_int32_t)v2, (int)v3));
+ }
+
+ if (!strcasecmp(name, "set_data_dir") ||
+ !strcasecmp(name, "db_data_dir")) /* Compatibility. */
+ return (dbenv->set_data_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_flags")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_cdb_alldb"))
+ return (dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1));
+ if (!strcasecmp(value, "db_direct_db"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_DB, 1));
+ if (!strcasecmp(value, "db_direct_log"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_LOG, 1));
+ if (!strcasecmp(value, "db_nolocking"))
+ return (dbenv->set_flags(dbenv, DB_NOLOCKING, 1));
+ if (!strcasecmp(value, "db_nommap"))
+ return (dbenv->set_flags(dbenv, DB_NOMMAP, 1));
+ if (!strcasecmp(value, "db_overwrite"))
+ return (dbenv->set_flags(dbenv, DB_OVERWRITE, 1));
+ if (!strcasecmp(value, "db_nopanic"))
+ return (dbenv->set_flags(dbenv, DB_NOPANIC, 1));
+ if (!strcasecmp(value, "db_region_init"))
+ return (dbenv->set_flags(dbenv, DB_REGION_INIT, 1));
+ if (!strcasecmp(value, "db_txn_nosync"))
+ return (dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1));
+ if (!strcasecmp(value, "db_txn_write_nosync"))
+ return (
+ dbenv->set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1));
+ if (!strcasecmp(value, "db_yieldcpu"))
+ return (dbenv->set_flags(dbenv, DB_YIELDCPU, 1));
+ goto badarg;
+ }
+
+ if (!strcasecmp(name, "set_lg_bsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_bsize(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_regionmax")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_regionmax(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_dir") ||
+ !strcasecmp(name, "db_log_dir")) /* Compatibility. */
+ return (dbenv->set_lg_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_lk_detect")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+ if (!strcasecmp(value, "db_lock_default"))
+ flags = DB_LOCK_DEFAULT;
+ else if (!strcasecmp(value, "db_lock_expire"))
+ flags = DB_LOCK_EXPIRE;
+ else if (!strcasecmp(value, "db_lock_maxlocks"))
+ flags = DB_LOCK_MAXLOCKS;
+ else if (!strcasecmp(value, "db_lock_minlocks"))
+ flags = DB_LOCK_MINLOCKS;
+ else if (!strcasecmp(value, "db_lock_minwrite"))
+ flags = DB_LOCK_MINWRITE;
+ else if (!strcasecmp(value, "db_lock_oldest"))
+ flags = DB_LOCK_OLDEST;
+ else if (!strcasecmp(value, "db_lock_random"))
+ flags = DB_LOCK_RANDOM;
+ else if (!strcasecmp(value, "db_lock_youngest"))
+ flags = DB_LOCK_YOUNGEST;
+ else
+ goto badarg;
+ return (dbenv->set_lk_detect(dbenv, flags));
+ }
+
+ if (!strcasecmp(name, "set_lk_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_locks")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_locks(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_lockers")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_lockers(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lk_max_objects")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_objects(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lock_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_LOCK_TIMEOUT));
+ }
+
+ if (!strcasecmp(name, "set_mp_mmapsize")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_mp_mmapsize(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_region_init")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1 || v1 != 1)
+ goto badarg;
+ return (dbenv->set_flags(
+ dbenv, DB_REGION_INIT, v1 == 0 ? 0 : 1));
+ }
+
+ if (!strcasecmp(name, "set_shm_key")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ return (dbenv->set_shm_key(dbenv, (long)v1));
+ }
+
+ if (!strcasecmp(name, "set_tas_spins")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tas_spins(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_tmp_dir") ||
+ !strcasecmp(name, "db_tmp_dir")) /* Compatibility.*/
+ return (dbenv->set_tmp_dir(dbenv, value));
+
+ if (!strcasecmp(name, "set_tx_max")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tx_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_txn_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_TXN_TIMEOUT));
+ }
+
+ if (!strcasecmp(name, "set_verbose")) {
+ if (sscanf(value, "%40s %c", arg, &v4) != 1)
+ goto badarg;
+
+ if (!strcasecmp(value, "db_verb_chkpoint"))
+ flags = DB_VERB_CHKPOINT;
+ else if (!strcasecmp(value, "db_verb_deadlock"))
+ flags = DB_VERB_DEADLOCK;
+ else if (!strcasecmp(value, "db_verb_recovery"))
+ flags = DB_VERB_RECOVERY;
+ else if (!strcasecmp(value, "db_verb_waitsfor"))
+ flags = DB_VERB_WAITSFOR;
+ else
+ goto badarg;
+ return (dbenv->set_verbose(dbenv, flags, 1));
+ }
+
+ __db_err(dbenv, "unrecognized name-value pair: %s", s);
+ return (EINVAL);
+
+badarg: __db_err(dbenv, "incorrect arguments for name-value pair: %s", s);
+ return (EINVAL);
+
+toobig: __db_err(dbenv,
+ "%s: %lu larger than maximum value %lu", s, __v, __max);
+ return (EINVAL);
+}
+
+/*
+ * __db_tmp_open --
+ * Create a temporary file.
+ */
+static int
+__db_tmp_open(dbenv, tmp_oflags, path, fhp)
+ DB_ENV *dbenv;
+ u_int32_t tmp_oflags;
+ char *path;
+ DB_FH *fhp;
+{
+ u_int32_t id;
+ int mode, isdir, ret;
+ const char *p;
+ char *trv;
+
+ /*
+ * Check the target directory; if you have six X's and it doesn't
+ * exist, this runs for a *very* long time.
+ */
+ if ((ret = __os_exists(path, &isdir)) != 0) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+ if (!isdir) {
+ __db_err(dbenv, "%s: %s", path, db_strerror(EINVAL));
+ return (EINVAL);
+ }
+
+ /* Build the path. */
+ for (trv = path; *trv != '\0'; ++trv)
+ ;
+ *trv = PATH_SEPARATOR[0];
+ for (p = DB_TRAIL; (*++trv = *p) != '\0'; ++p)
+ ;
+
+ /* Replace the X's with the process ID. */
+ for (__os_id(&id); *--trv == 'X'; id /= 10)
+ switch (id % 10) {
+ case 0: *trv = '0'; break;
+ case 1: *trv = '1'; break;
+ case 2: *trv = '2'; break;
+ case 3: *trv = '3'; break;
+ case 4: *trv = '4'; break;
+ case 5: *trv = '5'; break;
+ case 6: *trv = '6'; break;
+ case 7: *trv = '7'; break;
+ case 8: *trv = '8'; break;
+ case 9: *trv = '9'; break;
+ }
+ ++trv;
+
+ /* Set up open flags and mode. */
+ mode = __db_omode("rw----");
+
+ /* Loop, trying to open a file. */
+ for (;;) {
+ if ((ret = __os_open(dbenv, path,
+ tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
+ mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * !!!:
+ * If we don't get an EEXIST error, then there's something
+ * seriously wrong. Unfortunately, if the implementation
+ * doesn't return EEXIST for O_CREAT and O_EXCL regardless
+ * of other possible errors, we've lost.
+ */
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "tmp_open: %s: %s", path, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Tricky little algorithm for backward compatibility.
+ * Assumes sequential ordering of lower-case characters.
+ */
+ for (;;) {
+ if (*trv == '\0')
+ return (EINVAL);
+ if (*trv == 'z')
+ *trv++ = 'a';
+ else {
+ if (isdigit((int)*trv))
+ *trv = 'a';
+ else
+ ++*trv;
+ break;
+ }
+ }
+ }
+ /* NOTREACHED */
+}
diff --git a/storage/bdb/env/env_recover.c b/storage/bdb/env/env_recover.c
new file mode 100644
index 00000000000..fbe3b345b0d
--- /dev/null
+++ b/storage/bdb/env/env_recover.c
@@ -0,0 +1,790 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: env_recover.c,v 11.97 2002/08/22 17:43:22 margo Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+static int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, DB_LSN *));
+static int __log_earliest __P((DB_ENV *, DB_LOGC *, int32_t *, DB_LSN *));
+static double __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int));
+
+/*
+ * __db_apprec --
+ * Perform recovery. If max_lsn is non-NULL, then we are trying
+ * to synchronize this system up with another system that has a max
+ * LSN of max_lsn, so we need to roll back sufficiently far for that
+ * to work. See __log_backup for details.
+ *
+ * PUBLIC: int __db_apprec __P((DB_ENV *, DB_LSN *, u_int32_t));
+ */
+int
+__db_apprec(dbenv, max_lsn, flags)
+ DB_ENV *dbenv;
+ DB_LSN *max_lsn;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn;
+ DB_TXNREGION *region;
+ __txn_ckp_args *ckp_args;
+ time_t now, tlow;
+ int32_t log_size, low;
+ double nfiles;
+ int have_rec, is_thread, progress, ret, t_ret;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ u_int32_t hi_txn, lockid, txnid;
+ char *p, *pass, t1[60], t2[60];
+ void *txninfo;
+
+ COMPQUIET(nfiles, (double)0);
+
+ logc = NULL;
+ ckp_args = NULL;
+ dtab = NULL;
+ hi_txn = TXN_MAXIMUM;
+ lockid = DB_LOCK_INVALIDID;
+ txninfo = NULL;
+ pass = "initial";
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
+
+ /*
+ * Save the state of the thread flag -- we don't need it on at the
+ * moment because we're single-threaded until recovery is complete.
+ */
+ is_thread = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
+ F_CLR(dbenv, DB_ENV_THREAD);
+
+ /* Set in-recovery flags. */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ F_SET(region, TXN_IN_RECOVERY);
+
+ /* Allocate a cursor for the log. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+
+ /*
+ * If the user is specifying recovery to a particular point in time
+ * or to a particular LSN, find the point to start recovery from.
+ */
+ ZERO_LSN(lowlsn);
+ if (max_lsn != NULL) {
+ if ((ret = __log_backup(dbenv, logc, max_lsn, &lowlsn)) != 0)
+ goto err;
+ } else if (dbenv->tx_timestamp != 0) {
+ if ((ret = __log_earliest(dbenv, logc, &low, &lowlsn)) != 0)
+ goto err;
+ if ((int32_t)dbenv->tx_timestamp < low) {
+ (void)snprintf(t1, sizeof(t1),
+ "%s", ctime(&dbenv->tx_timestamp));
+ if ((p = strchr(t1, '\n')) != NULL)
+ *p = '\0';
+ tlow = (time_t)low;
+ (void)snprintf(t2, sizeof(t2), "%s", ctime(&tlow));
+ if ((p = strchr(t2, '\n')) != NULL)
+ *p = '\0';
+ __db_err(dbenv,
+ "Invalid recovery timestamp %s; earliest time is %s",
+ t1, t2);
+ ret = EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * Recovery is done in three passes:
+ * Pass #0:
+ * We need to find the position from which we will open files.
+ * We need to open files beginning with the earlier of the
+ * most recent checkpoint LSN and a checkpoint LSN before the
+ * recovery timestamp, if specified. We need to be before the
+ * most recent checkpoint LSN because we are going to collect
+ * information about which transactions were begun before we
+ * start rolling forward. Those that were should never be undone
+ * because queue cannot use LSNs to determine what operations can
+ * safely be aborted and it cannot rollback operations in
+ * transactions for which there may be records not processed
+ * during recovery. We need to consider earlier points in time
+ * in case we are recovering to a particular timestamp.
+ *
+ * Pass #1:
+ * Read forward through the log from the position found in pass 0
+ * opening and closing files, and recording transactions for which
+ * we've seen their first record (the transaction's prev_lsn is
+ * 0,0). At the end of this pass, we know all transactions for
+ * which we've seen begins and we have the "current" set of files
+ * open.
+ *
+ * Pass #2:
+ * Read backward through the log undoing any uncompleted TXNs.
+ * There are four cases:
+ * 1. If doing catastrophic recovery, we read to the
+ * beginning of the log
+ * 2. If we are doing normal reovery, then we have to roll
+ * back to the most recent checkpoint LSN.
+ * 3. If we are recovering to a point in time, then we have
+ * to roll back to the checkpoint whose ckp_lsn is earlier
+ * than the specified time. __log_earliest will figure
+ * this out for us.
+ * 4. If we are recovering back to a particular LSN, then
+ * we have to roll back to the checkpoint whose ckp_lsn
+ * is earlier than the max_lsn. __log_backup will figure
+ * that out for us.
+ * In case 2, "uncompleted TXNs" include all those who commited
+ * after the user's specified timestamp.
+ *
+ * Pass #3:
+ * Read forward through the log from the LSN found in pass #2,
+ * redoing any committed TXNs (which commited after any user-
+ * specified rollback point). During this pass, checkpoint
+ * file information is ignored, and file openings and closings
+ * are redone.
+ *
+ * ckp_lsn -- lsn of the last checkpoint or the first in the log.
+ * first_lsn -- the lsn where the forward passes begin.
+ * last_lsn -- the last lsn in the log, used for feedback
+ * lowlsn -- the lsn we are rolling back to, if we are recovering
+ * to a point in time.
+ * lsn -- temporary use lsn.
+ * stop_lsn -- the point at which forward roll should stop
+ */
+
+ /*
+ * Find out the last lsn, so that we can estimate how far along we
+ * are in recovery. This will help us determine how much log there
+ * is between the first LSN that we're going to be working with and
+ * the last one. We assume that each of the three phases takes the
+ * same amount of time (a false assumption) and then use the %-age
+ * of the amount of log traversed to figure out how much of the
+ * pass we've accomplished.
+ *
+ * If we can't find any log records, we're kind of done.
+ */
+#ifdef UMRW
+ ZERO_LSN(last_lsn);
+#endif
+ memset(&data, 0, sizeof(data));
+ if ((ret = logc->get(logc, &last_lsn, &data, DB_LAST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "Last log record not found");
+ goto err;
+ }
+
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_PREV)) == 0);
+
+ /*
+ * There are no transactions, so there is nothing to do unless
+ * we're recovering to an LSN. If we are, we need to proceed since
+ * we'll still need to do a vtruncate based on information we haven't
+ * yet collected.
+ */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
+ }
+ if (ret != 0)
+ goto err;
+
+ hi_txn = txnid;
+
+ /*
+ * Pass #0
+ * Find the LSN from which we begin OPENFILES.
+ *
+ * If this is a catastrophic recovery, or if no checkpoint exists
+ * in the log, the LSN is the first LSN in the log.
+ *
+ * Otherwise, it is the minimum of (1) the LSN in the last checkpoint
+ * and (2) the LSN in the checkpoint before any specified recovery
+ * timestamp or max_lsn.
+ */
+ /*
+ * Get the first LSN in the log; it's an initial default
+ * even if this is not a catastrophic recovery.
+ */
+ if ((ret = logc->get(logc, &ckp_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ first_lsn = ckp_lsn;
+ have_rec = 1;
+
+ if (!LF_ISSET(DB_RECOVER_FATAL)) {
+ if ((ret = __txn_getckp(dbenv, &ckp_lsn)) == 0 &&
+ (ret = logc->get(logc, &ckp_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)ckp_lsn.file,
+ (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ have_rec = 0;
+ }
+
+ /*
+ * If LSN (2) exists, use it if it's before LSN (1).
+ * (If LSN (1) doesn't exist, first_lsn is the
+ * beginning of the log, so will "win" this check.)
+ *
+ * XXX
+ * In the recovery-to-a-timestamp case, lowlsn is chosen by
+ * __log_earliest, and is the checkpoint LSN of the
+ * *earliest* checkpoint in the unreclaimed log. I
+ * (krinsky) believe that we could optimize this by looking
+ * instead for the LSN of the *latest* checkpoint before
+ * the timestamp of interest, but I'm not sure that this
+ * is worth doing right now. (We have to look for lowlsn
+ * and low anyway, to make sure the requested timestamp is
+ * somewhere in the logs we have, and all that's required
+ * is that we pick *some* checkpoint after the beginning of
+ * the logs and before the timestamp.
+ */
+ if ((dbenv->tx_timestamp != 0 || max_lsn != NULL) &&
+ log_compare(&lowlsn, &first_lsn) < 0) {
+ DB_ASSERT(have_rec == 0);
+ first_lsn = lowlsn;
+ }
+ }
+
+ /* Get the record at first_lsn if we don't have it already. */
+ if (!have_rec &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0) {
+ __db_err(dbenv, "Checkpoint LSN record [%ld][%ld] not found",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+ goto err;
+ }
+
+ if (dbenv->db_feedback != NULL) {
+ if (last_lsn.file == first_lsn.file)
+ nfiles = (double)
+ (last_lsn.offset - first_lsn.offset) / log_size;
+ else
+ nfiles = (double)(last_lsn.file - first_lsn.file) +
+ (double)(log_size - first_lsn.offset +
+ last_lsn.offset) / log_size;
+ /* We are going to divide by nfiles; make sure it isn't 0. */
+ if (nfiles == 0)
+ nfiles = (double)0.001;
+ }
+
+ /* Find a low txnid. */
+ ret = 0;
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_NEXT)) == 0);
+
+ /*
+ * There are no transactions and we're not recovering to an LSN (see
+ * above), so there is nothing to do.
+ */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
+ }
+
+ /* Reset to the first lsn. */
+ if (ret != 0 || (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+
+ /* Initialize the transaction list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, txnid, hi_txn, max_lsn, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Pass #1
+ * Run forward through the log starting at the first relevant lsn.
+ */
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, &last_lsn, nfiles, 1)) != 0)
+ goto err;
+
+ /*
+ * Pass #2.
+ *
+ * We used first_lsn to tell us how far back we need to recover,
+ * use it here.
+ */
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv, "Recovery starting from [%lu][%lu]",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+
+ /*
+ * If we are doing client recovery, then we need to allocate
+ * the page-info lock table.
+ */
+ if (max_lsn != NULL) {
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+ }
+
+ pass = "backward";
+ for (ret = logc->get(logc, &lsn, &data, DB_LAST);
+ ret == 0 && log_compare(&lsn, &first_lsn) >= 0;
+ ret = logc->get(logc, &lsn, &data, DB_PREV)) {
+ if (dbenv->db_feedback != NULL) {
+ progress = 34 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 0) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ if (max_lsn != NULL && (ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, &lsn, NULL, NULL, lockid)) != 0)
+ continue;
+
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_BACKWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+ }
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Pass #3. If we are recovering to a timestamp or to an LSN,
+ * we need to make sure that we don't roll-forward beyond that
+ * point because there may be non-transactional operations (e.g.,
+ * closes that would fail). The last_lsn variable is used for
+ * feedback calculations, but use it to set an initial stopping
+ * point for the forward pass, and then reset appropriately to
+ * derive a real stop_lsn that tells how far the forward pass
+ * should go.
+ */
+ pass = "forward";
+ stop_lsn = last_lsn;
+ if (max_lsn != NULL || dbenv->tx_timestamp != 0)
+ stop_lsn = ((DB_TXNHEAD *)txninfo)->maxlsn;
+
+ for (ret = logc->get(logc, &lsn, &data, DB_NEXT);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
+ /*
+ * If we are recovering to a timestamp or an LSN,
+ * we need to make sure that we don't try to roll
+ * forward beyond the soon-to-be end of log.
+ */
+ if (log_compare(&lsn, &stop_lsn) > 0)
+ break;
+
+ if (dbenv->db_feedback != NULL) {
+ progress = 67 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_FORWARD_ROLL, txninfo);
+ if (ret != 0) {
+ if (ret != DB_TXN_CKP)
+ goto msgerr;
+ else
+ ret = 0;
+ }
+
+ }
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+
+ /*
+ * Process any pages that were on the limbo list and move them to
+ * the free list. Do this before checkpointing the database.
+ */
+ if ((ret = __db_do_the_limbo(dbenv, NULL, NULL, txninfo)) != 0)
+ goto err;
+
+ if (max_lsn == NULL)
+ region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid;
+
+ /* Take a checkpoint here to force any dirty data pages to disk. */
+ if (dbenv->tx_timestamp != 0) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+ __log_vtruncate(dbenv, &((DB_TXNHEAD *)txninfo)->maxlsn,
+ &((DB_TXNHEAD *)txninfo)->ckplsn);
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
+ goto err;
+
+ /* Close all the db files that are open. */
+ if ((ret = __dbreg_close_files(dbenv)) != 0)
+ goto err;
+
+ if (max_lsn != NULL) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+
+ /* We are going to truncate, so we'd best close the cursor. */
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ goto err;
+ __log_vtruncate(dbenv,
+ max_lsn, &((DB_TXNHEAD *)txninfo)->ckplsn);
+
+ /*
+ * Now we need to open files that should be open in order for
+ * client processing to continue. However, since we've
+ * truncated the log, we need to recompute from where the
+ * openfiles pass should begin.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ if ((ret = __txn_getckp(dbenv, &first_lsn)) == 0 &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)first_lsn.file,
+ (u_long)first_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ }
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, NULL, nfiles, 1)) != 0)
+ goto err;
+ } else if (region->stat.st_nrestores == 0)
+ /*
+ * If there are no prepared transactions that need resolution,
+ * we need to reset the transaction ID space and log this fact.
+ */
+ if ((ret = __txn_reset(dbenv)) != 0)
+ goto err;
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) {
+ (void)time(&now);
+ __db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
+ __db_err(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction ID",
+ ((DB_TXNHEAD *)txninfo)->maxid,
+ "Recovery checkpoint",
+ (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+msgerr: __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed on %s pass",
+ (u_long)lsn.file, (u_long)lsn.offset, pass);
+ }
+
+done:
+err: if (lockid != DB_LOCK_INVALIDID) {
+ if ((t_ret = __rep_unlockpages(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
+ if (ckp_args != NULL)
+ __os_free(dbenv, ckp_args);
+
+ dbenv->tx_timestamp = 0;
+
+ /* Restore the state of the thread flag, clear in-recovery flags. */
+ if (is_thread)
+ F_SET(dbenv, DB_ENV_THREAD);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ F_CLR(region, TXN_IN_RECOVERY);
+
+ return (ret);
+}
+
+/*
+ * Figure out how many logfiles we have processed. If we are moving
+ * forward (is_forward != 0), then we're computing current - low. If
+ * we are moving backward, we are computing high - current. max is
+ * the number of bytes per logfile.
+ */
+static double
+__lsn_diff(low, high, current, max, is_forward)
+ DB_LSN *low, *high, *current;
+ u_int32_t max;
+ int is_forward;
+{
+ double nf;
+
+ /*
+ * There are three cases in each direction. If you are in the
+ * same file, then all you need worry about is the difference in
+ * offsets. If you are in different files, then either your offsets
+ * put you either more or less than the integral difference in the
+ * number of files -- we need to handle both of these.
+ */
+ if (is_forward) {
+ if (current->file == low->file)
+ nf = (double)(current->offset - low->offset) / max;
+ else if (current->offset < low->offset)
+ nf = (double)(current->file - low->file - 1) +
+ (double)(max - low->offset + current->offset) / max;
+ else
+ nf = (double)(current->file - low->file) +
+ (double)(current->offset - low->offset) / max;
+ } else {
+ if (current->file == high->file)
+ nf = (double)(high->offset - current->offset) / max;
+ else if (current->offset > high->offset)
+ nf = (double)(high->file - current->file - 1) +
+ (double)
+ (max - current->offset + high->offset) / max;
+ else
+ nf = (double)(high->file - current->file) +
+ (double)(high->offset - current->offset) / max;
+ }
+ return (nf);
+}
+
+/*
+ * __log_backup --
+ *
+ * This is used to find the earliest log record to process when a client
+ * is trying to sync up with a master whose max LSN is less than this
+ * client's max lsn; we want to roll back everything after that
+ *
+ * Find the latest checkpoint whose ckp_lsn is less than the max lsn.
+ */
+static int
+__log_backup(dbenv, logc, max_lsn, start_lsn)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN *max_lsn, *start_lsn;
+{
+ DB_LSN lsn;
+ DBT data;
+ __txn_ckp_args *ckp_args;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+ ckp_args = NULL;
+
+ /*
+ * Follow checkpoints through the log until we find one with
+ * a ckp_lsn less than max_lsn.
+ */
+ if ((ret = __txn_getckp(dbenv, &lsn)) != 0)
+ goto err;
+ while ((ret = logc->get(logc, &lsn, &data, DB_SET)) == 0) {
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0)
+ return (ret);
+ if (log_compare(&ckp_args->ckp_lsn, max_lsn) <= 0) {
+ *start_lsn = ckp_args->ckp_lsn;
+ break;
+ }
+
+ lsn = ckp_args->prev_lsn;
+ if (IS_ZERO_LSN(lsn))
+ break;
+ __os_free(dbenv, ckp_args);
+ }
+
+ if (ckp_args != NULL)
+ __os_free(dbenv, ckp_args);
+err: if (IS_ZERO_LSN(*start_lsn) && (ret == 0 || ret == DB_NOTFOUND))
+ ret = logc->get(logc, start_lsn, &data, DB_FIRST);
+ return (ret);
+}
+
+/*
+ * __log_earliest --
+ *
+ * Return the earliest recovery point for the log files present. The
+ * earliest recovery time is the time stamp of the first checkpoint record
+ * whose checkpoint LSN is greater than the first LSN we process.
+ */
+static int
+__log_earliest(dbenv, logc, lowtime, lowlsn)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int32_t *lowtime;
+ DB_LSN *lowlsn;
+{
+ DB_LSN first_lsn, lsn;
+ DBT data;
+ __txn_ckp_args *ckpargs;
+ u_int32_t rectype;
+ int cmp, ret;
+
+ memset(&data, 0, sizeof(data));
+ /*
+ * Read forward through the log looking for the first checkpoint
+ * record whose ckp_lsn is greater than first_lsn.
+ */
+
+ for (ret = logc->get(logc, &first_lsn, &data, DB_FIRST);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype != DB___txn_ckp)
+ continue;
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckpargs)) == 0) {
+ cmp = log_compare(&ckpargs->ckp_lsn, &first_lsn);
+ *lowlsn = ckpargs->ckp_lsn;
+ *lowtime = ckpargs->timestamp;
+
+ __os_free(dbenv, ckpargs);
+ if (cmp >= 0)
+ break;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * __env_openfiles --
+ * Perform the pass of recovery that opens files. This is used
+ * both during regular recovery and an initial call to txn_recover (since
+ * we need files open in order to abort prepared, but not yet committed
+ * transactions).
+ *
+ * See the comments in db_apprec for a detailed description of the
+ * various recovery passes.
+ *
+ * If we are not doing feedback processing (i.e., we are doing txn_recover
+ * processing and in_recovery is zero), then last_lsn can be NULL.
+ *
+ * PUBLIC: int __env_openfiles __P((DB_ENV *, DB_LOGC *,
+ * PUBLIC: void *, DBT *, DB_LSN *, DB_LSN *, double, int));
+ */
+int
+__env_openfiles(dbenv, logc, txninfo,
+ data, open_lsn, last_lsn, nfiles, in_recovery)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ void *txninfo;
+ DBT *data;
+ DB_LSN *open_lsn, *last_lsn;
+ int in_recovery;
+ double nfiles;
+{
+ DB_LSN lsn;
+ u_int32_t log_size;
+ int progress, ret;
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
+
+ lsn = *open_lsn;
+ for (;;) {
+ if (in_recovery && dbenv->db_feedback != NULL) {
+ DB_ASSERT(last_lsn != NULL);
+ progress = (int)(33 * (__lsn_diff(open_lsn,
+ last_lsn, &lsn, log_size, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ dbenv->recover_dtab, dbenv->recover_dtab_size, data, &lsn,
+ in_recovery ? DB_TXN_OPENFILES : DB_TXN_POPENFILES,
+ txninfo);
+ if (ret != 0 && ret != DB_TXN_CKP) {
+ __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ break;
+ }
+ if ((ret = logc->get(logc, &lsn, data, DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ break;
+ }
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/env/env_region.c b/storage/bdb/env/env_region.c
new file mode 100644
index 00000000000..a919cf328b4
--- /dev/null
+++ b/storage/bdb/env/env_region.c
@@ -0,0 +1,1256 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_region.c,v 11.64 2002/07/17 15:09:19 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __db_des_destroy __P((DB_ENV *, REGION *));
+static int __db_des_get __P((DB_ENV *, REGINFO *, REGINFO *, REGION **));
+static int __db_e_remfile __P((DB_ENV *));
+static int __db_faultmem __P((DB_ENV *, void *, size_t, int));
+static void __db_region_destroy __P((DB_ENV *, REGINFO *));
+
+/*
+ * __db_e_attach
+ * Join/create the environment
+ *
+ * PUBLIC: int __db_e_attach __P((DB_ENV *, u_int32_t *));
+ */
+int
+__db_e_attach(dbenv, init_flagsp)
+ DB_ENV *dbenv;
+ u_int32_t *init_flagsp;
+{
+ REGENV *renv;
+ REGENV_REF ref;
+ REGINFO *infop;
+ REGION *rp, tregion;
+ size_t size;
+ size_t nrw;
+ u_int32_t mbytes, bytes;
+ int retry_cnt, ret, segid;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+#if !defined(HAVE_MUTEX_THREADS)
+ /*
+ * !!!
+ * If we don't have spinlocks, we need a file descriptor for fcntl(2)
+ * locking. We use the file handle from the REGENV file for this
+ * purpose.
+ *
+ * Since we may be using shared memory regions, e.g., shmget(2), and
+ * not a mapped-in regular file, the backing file may be only a few
+ * bytes in length. So, this depends on the ability to call fcntl to
+ * lock file offsets much larger than the actual physical file. I
+ * think that's safe -- besides, very few systems actually need this
+ * kind of support, SunOS is the only one still in wide use of which
+ * I'm aware.
+ *
+ * The error case is if an application lacks spinlocks and wants to be
+ * threaded. That doesn't work because fcntl may lock the underlying
+ * process, including all its threads.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv,
+"architecture lacks fast mutexes: applications cannot be threaded");
+ return (EINVAL);
+ }
+#endif
+
+ /* Initialization */
+ retry_cnt = 0;
+
+ /* Repeated initialization. */
+loop: renv = NULL;
+
+ /* Set up the DB_ENV's REG_INFO structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(REGINFO), &infop)) != 0)
+ return (ret);
+ infop->type = REGION_TYPE_ENV;
+ infop->id = REGION_ID_ENV;
+ infop->mode = dbenv->db_mode;
+ infop->flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(infop, REGION_CREATE_OK);
+
+ /*
+ * We have to single-thread the creation of the REGENV region. Once
+ * it exists, we can do locking using locks in the region, but until
+ * then we have to be the only player in the game.
+ *
+ * If this is a private environment, we are only called once and there
+ * are no possible race conditions.
+ *
+ * If this is a public environment, we use the filesystem to ensure
+ * the creation of the environment file is single-threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if ((ret = __os_strdup(dbenv,
+ "process-private", &infop->name)) != 0)
+ goto err;
+ goto creation;
+ }
+
+ /* Build the region name. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+
+ /*
+ * Try to create the file, if we have the authority. We have to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the file are properly ordered. Open using the O_CREAT and O_EXCL
+ * flags so that multiple attempts to create the region will return
+ * failure in all but one. POSIX 1003.1 requires that EEXIST be the
+ * errno return value -- I sure hope they're right.
+ */
+ if (F_ISSET(dbenv, DB_ENV_CREATE)) {
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_REGION,
+ dbenv->db_mode, dbenv->lockfhp)) == 0)
+ goto creation;
+ if (ret != EEXIST) {
+ __db_err(dbenv,
+ "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If we couldn't create the file, try and open it. (If that fails,
+ * we're done.)
+ */
+ if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION | DB_OSO_DIRECT,
+ dbenv->db_mode, dbenv->lockfhp)) != 0)
+ goto err;
+
+ /*
+ * !!!
+ * The region may be in system memory not backed by the filesystem
+ * (more specifically, not backed by this file), and we're joining
+ * it. In that case, the process that created it will have written
+ * out a REGENV_REF structure as its only contents. We read that
+ * structure before we do anything further, e.g., we can't just map
+ * that file in and then figure out what's going on.
+ *
+ * All of this noise is because some systems don't have a coherent VM
+ * and buffer cache, and what's worse, when you mix operations on the
+ * VM and buffer cache, half the time you hang the system.
+ *
+ * If the file is the size of an REGENV_REF structure, then we know
+ * the real region is in some other memory. (The only way you get a
+ * file that size is to deliberately write it, as it's smaller than
+ * any possible disk sector created by writing a file or mapping the
+ * file into memory.) In which case, retrieve the structure from the
+ * file and use it to acquire the referenced memory.
+ *
+ * If the structure is larger than a REGENV_REF structure, then this
+ * file is backing the shared memory region, and we just map it into
+ * memory.
+ *
+ * And yes, this makes me want to take somebody and kill them. (I
+ * digress -- but you have no freakin' idea. This is unbelievably
+ * stupid and gross, and I've probably spent six months of my life,
+ * now, trying to make different versions of it work.)
+ */
+ if ((ret = __os_ioinfo(dbenv, infop->name,
+ dbenv->lockfhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * !!!
+ * A size_t is OK -- regions get mapped into memory, and so can't
+ * be larger than a size_t.
+ */
+ size = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If the size is less than the size of a REGENV_REF structure, the
+ * region (or, possibly, the REGENV_REF structure) has not yet been
+ * completely written. Wait awhile and try again.
+ *
+ * Otherwise, if the size is the size of a REGENV_REF structure,
+ * read it into memory and use it as a reference to the real region.
+ */
+ if (size <= sizeof(ref)) {
+ if (size != sizeof(ref))
+ goto retry;
+
+ if ((ret = __os_read(dbenv, dbenv->lockfhp, &ref,
+ sizeof(ref), &nrw)) != 0 || nrw < (size_t)sizeof(ref)) {
+ if (ret == 0)
+ ret = EIO;
+ __db_err(dbenv,
+ "%s: unable to read system-memory information from: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ size = ref.size;
+ segid = ref.segid;
+
+ F_SET(dbenv, DB_ENV_SYSTEM_MEM);
+ } else if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: existing environment not created in system memory: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ } else
+ segid = INVALID_REGION_SEGID;
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#ifdef HAVE_MUTEX_THREADS
+ __os_closehandle(dbenv, dbenv->lockfhp);
+#endif
+
+ /* Call the region join routine to acquire the region. */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = (roff_t)size;
+ tregion.segid = segid;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * The environment's REGENV structure has to live at offset 0 instead
+ * of the usual shalloc information. Set the primary reference and
+ * correct the "addr" value to reference the shalloc region. Note,
+ * this means that all of our offsets (R_ADDR/R_OFFSET) get shifted
+ * as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+
+ /*
+ * Check if the environment has had a catastrophic failure.
+ *
+ * Check the magic number to ensure the region is initialized. If the
+ * magic number isn't set, the lock may not have been initialized, and
+ * an attempt to use it could lead to random behavior.
+ *
+ * The panic and magic values aren't protected by any lock, so we never
+ * use them in any check that's more complex than set/not-set.
+ *
+ * !!!
+ * I'd rather play permissions games using the underlying file, but I
+ * can't because Windows/NT filesystems won't open files mode 0.
+ */
+ renv = infop->primary;
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
+ ret = __db_panic_msg(dbenv);
+ goto err;
+ }
+ if (renv->magic != DB_REGION_MAGIC)
+ goto retry;
+
+ /* Make sure the region matches our build. */
+ if (renv->majver != DB_VERSION_MAJOR ||
+ renv->minver != DB_VERSION_MINOR ||
+ renv->patch != DB_VERSION_PATCH) {
+ __db_err(dbenv,
+ "Program version %d.%d.%d doesn't match environment version %d.%d.%d",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ renv->majver, renv->minver, renv->patch);
+#ifndef DIAGNOSTIC
+ ret = EINVAL;
+ goto err;
+#endif
+ }
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * Finally! We own the environment now. Repeat the panic check, it's
+ * possible that it was set while we waited for the lock.
+ */
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
+ ret = __db_panic_msg(dbenv);
+ goto err_unlock;
+ }
+
+ /*
+ * Get a reference to the underlying REGION information for this
+ * environment.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0 || rp == NULL) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto find_err;
+ }
+ infop->rp = rp;
+
+ /*
+ * There's still a possibility for inconsistent data. When we acquired
+ * the size of the region and attached to it, it might have still been
+ * growing as part of its creation. We can detect this by checking the
+ * size we originally found against the region's current size. (The
+ * region's current size has to be final, the creator finished growing
+ * it before releasing the environment for us to lock.)
+ */
+ if (rp->size != size) {
+err_unlock: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ goto retry;
+ }
+
+ /* Increment the reference count. */
+ ++renv->refcnt;
+
+ /*
+ * If our caller wants them, return the flags this environment was
+ * initialized with.
+ */
+ if (init_flagsp != NULL)
+ *init_flagsp = renv->init_flags;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Fault the pages into memory. Note, do this AFTER releasing the
+ * lock, because we're only reading the pages, not writing them.
+ */
+ (void)__db_faultmem(dbenv, infop->primary, rp->size, 0);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+creation:
+ /* Create the environment region. */
+ F_SET(infop, REGION_CREATE);
+
+ /*
+ * Allocate room for 50 REGION structures plus overhead (we're going
+ * to use this space for last-ditch allocation requests), although we
+ * should never need anything close to that.
+ *
+ * Encryption passwds are stored in the env region. Add that in too.
+ */
+ memset(&tregion, 0, sizeof(tregion));
+ tregion.size = (roff_t)(50 * sizeof(REGION) +
+ dbenv->passwd_len + 2048);
+ tregion.segid = INVALID_REGION_SEGID;
+ if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything, because we're writing the pages, not just reading them.
+ */
+ (void)__db_faultmem(dbenv, infop->addr, tregion.size, 1);
+
+ /*
+ * The first object in the region is the REGENV structure. This is
+ * different from the other regions, and, from everything else in
+ * this region, where all objects are allocated from the pool, i.e.,
+ * there aren't any fixed locations. The remaining space is made
+ * available for later allocation.
+ *
+ * The allocation space must be size_t aligned, because that's what
+ * the initialization routine is going to store there. To make sure
+ * that happens, the REGENV structure was padded with a final size_t.
+ * No other region needs to worry about it because all of them treat
+ * the entire region as allocation space.
+ *
+ * Set the primary reference and correct the "addr" value to reference
+ * the shalloc region. Note, this requires that we "uncorrect" it at
+ * region detach, and that all of our offsets (R_ADDR/R_OFFSET) will be
+ * shifted as well, but that should be fine.
+ */
+ infop->primary = R_ADDR(infop, 0);
+ infop->addr = (u_int8_t *)infop->addr + sizeof(REGENV);
+ __db_shalloc_init(infop->addr, tregion.size - sizeof(REGENV));
+
+ /*
+ * Initialize the rest of the REGENV structure, except for the magic
+ * number which validates the file/environment.
+ */
+ renv = infop->primary;
+ renv->envpanic = 0;
+ db_version(&renv->majver, &renv->minver, &renv->patch);
+ SH_LIST_INIT(&renv->regionq);
+ renv->refcnt = 1;
+ renv->cipher_off = INVALID_ROFF;
+ renv->rep_off = INVALID_ROFF;
+
+ /*
+ * Initialize init_flags to store the flags that any other environment
+ * handle that uses DB_JOINENV to join this environment will need.
+ */
+ renv->init_flags = (init_flagsp == NULL) ? 0 : *init_flagsp;
+
+ /*
+ * Lock the environment.
+ *
+ * Check the lock call return. This is the first lock we initialize
+ * and acquire, and we have to know if it fails. (It CAN fail, e.g.,
+ * SunOS, when using fcntl(2) for locking and using an in-memory
+ * filesystem as the database home. But you knew that, I'm sure -- it
+ * probably wasn't even worth mentioning.)
+ */
+ if ((ret = __db_mutex_setup(dbenv, infop, &renv->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
+ __db_err(dbenv, "%s: unable to initialize environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ if (!F_ISSET(&renv->mutex, MUTEX_IGNORE) &&
+ (ret = __db_mutex_lock(dbenv, &renv->mutex)) != 0) {
+ __db_err(dbenv, "%s: unable to acquire environment lock: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Get the underlying REGION structure for this environment. Note,
+ * we created the underlying OS region before we acquired the REGION
+ * structure, which is backwards from the normal procedure. Update
+ * the REGION structure.
+ */
+ if ((ret = __db_des_get(dbenv, infop, infop, &rp)) != 0) {
+find_err: __db_err(dbenv,
+ "%s: unable to find environment", infop->name);
+ if (ret == 0)
+ ret = EINVAL;
+ goto err;
+ }
+ infop->rp = rp;
+ rp->size = tregion.size;
+ rp->segid = tregion.segid;
+
+ /*
+ * !!!
+ * If we create an environment where regions are public and in system
+ * memory, we have to inform processes joining the environment how to
+ * attach to the shared memory segment. So, we write the shared memory
+ * identifier into the file, to be read by those other processes.
+ *
+ * XXX
+ * This is really OS-layer information, but I can't see any easy way
+ * to move it down there without passing down information that it has
+ * no right to know, e.g., that this is the one-and-only REGENV region
+ * and not some other random region.
+ */
+ if (tregion.segid != INVALID_REGION_SEGID) {
+ ref.size = tregion.size;
+ ref.segid = tregion.segid;
+ if ((ret = __os_write(
+ dbenv, dbenv->lockfhp, &ref, sizeof(ref), &nrw)) != 0) {
+ __db_err(dbenv,
+ "%s: unable to write out public environment ID: %s",
+ infop->name, db_strerror(ret));
+ goto err;
+ }
+ }
+
+ /*
+ * If not doing thread locking, we need to save the file handle for
+ * fcntl(2) locking. Otherwise, discard the handle, we no longer
+ * need it, and the less contact between the buffer cache and the VM,
+ * the better.
+ */
+#if defined(HAVE_MUTEX_THREADS)
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ __os_closehandle(dbenv, dbenv->lockfhp);
+#endif
+
+ /* Validate the file. */
+ renv->magic = DB_REGION_MAGIC;
+
+ /* Discard our lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Everything looks good, we're done. */
+ dbenv->reginfo = infop;
+ return (0);
+
+err:
+retry: /* Close any open file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
+
+ /*
+ * If we joined or created the region, detach from it. If we created
+ * it, destroy it. Note, there's a path in the above code where we're
+ * using a temporary REGION structure because we haven't yet allocated
+ * the real one. In that case the region address (addr) will be filled
+ * in, but the REGION pointer (rp) won't. Fix it.
+ */
+ if (infop->addr != NULL) {
+ if (infop->rp == NULL)
+ infop->rp = &tregion;
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ }
+
+ /* Free the allocated name and/or REGINFO structure. */
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, infop);
+
+ /* If we had a temporary error, wait awhile and try again. */
+ if (ret == 0) {
+ if (++retry_cnt > 3) {
+ __db_err(dbenv, "unable to join the environment");
+ ret = EAGAIN;
+ } else {
+ __os_sleep(dbenv, retry_cnt * 3, 0);
+ goto loop;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_e_detach --
+ * Detach from the environment.
+ *
+ * PUBLIC: int __db_e_detach __P((DB_ENV *, int));
+ */
+int
+__db_e_detach(dbenv, destroy)
+ DB_ENV *dbenv;
+ int destroy;
+{
+ REGENV *renv;
+ REGINFO *infop;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /* Decrement the reference count. */
+ if (renv->refcnt == 0) {
+ __db_err(dbenv,
+ "region %lu (environment): reference count went negative",
+ infop->rp->id);
+ } else
+ --renv->refcnt;
+
+ /* Release the lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Close the locking file handle. */
+ if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
+
+ /* Reset the addr value that we "corrected" above. */
+ infop->addr = infop->primary;
+
+ /*
+ * If we are destroying the environment, we need to
+ * destroy any system resources backing the mutex, as well
+ * as any system resources that the replication system may have
+ * acquired and put in the main region.
+ *
+ * Do these now before we free the memory in __os_r_detach.
+ */
+ if (destroy) {
+ __rep_region_destroy(dbenv);
+ __db_mutex_destroy(&renv->mutex);
+ __db_mutex_destroy(&infop->rp->mutex);
+ }
+
+ /*
+ * Release the region, and kill our reference.
+ *
+ * We set the DB_ENV->reginfo field to NULL here and discard its memory.
+ * DB_ENV->remove calls __dbenv_remove to do the region remove, and
+ * __dbenv_remove attached and then detaches from the region. We don't
+ * want to return to DB_ENV->remove with a non-NULL DB_ENV->reginfo
+ * field because it will attempt to detach again as part of its cleanup.
+ */
+ (void)__os_r_detach(dbenv, infop, destroy);
+
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, dbenv->reginfo);
+ dbenv->reginfo = NULL;
+
+ return (0);
+}
+
+/*
+ * __db_e_remove --
+ * Discard an environment if it's not in use.
+ *
+ * PUBLIC: int __db_e_remove __P((DB_ENV *, u_int32_t));
+ */
+int
+__db_e_remove(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV *renv;
+ REGINFO *infop, reginfo;
+ REGION *rp;
+ u_int32_t db_env_reset;
+ int force, ret;
+
+ force = LF_ISSET(DB_FORCE) ? 1 : 0;
+ /*
+ * This routine has to walk a nasty line between not looking into
+ * the environment (which may be corrupted after an app or system
+ * crash), and removing everything that needs removing. What we
+ * do is:
+ * 1. Connect to the environment (so it better be OK).
+ * 2. If the environment is in use (reference count is non-zero),
+ * return EBUSY.
+ * 3. Overwrite the magic number so that any threads of control
+ * attempting to connect will backoff and retry.
+ * 4. Walk the list of regions. Connect to each region and then
+ * disconnect with the destroy flag set. This shouldn't cause
+ * any problems, even if the region is corrupted, because we
+ * should never be looking inside the region.
+ * 5. Walk the list of files in the directory, unlinking any
+ * files that match a region name. Unlink the environment
+ * file last.
+ *
+ * If the force flag is set, we do not acquire any locks during this
+ * process.
+ */
+ db_env_reset = F_ISSET(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
+ if (force)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ F_SET(dbenv, DB_ENV_NOPANIC);
+
+ /* Join the environment. */
+ if ((ret = __db_e_attach(dbenv, NULL)) != 0) {
+ /*
+ * If we can't join it, we assume that's because it doesn't
+ * exist. It would be better to know why we failed, but it
+ * probably isn't important.
+ */
+ ret = 0;
+ if (force)
+ goto remfiles;
+ goto done;
+ }
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * If it's in use, we're done unless we're forcing the issue or the
+ * environment has panic'd. (Presumably, if the environment panic'd,
+ * the thread holding the reference count may not have cleaned up.)
+ */
+ if (renv->refcnt == 1 || renv->envpanic == 1 || force) {
+ /*
+ * Set the panic flag and overwrite the magic number.
+ *
+ * !!!
+ * From this point on, there's no going back, we pretty
+ * much ignore errors, and just whack on whatever we can.
+ */
+ renv->envpanic = 1;
+ renv->magic = 0;
+
+ /*
+ * Unlock the environment. We should no longer need the lock
+ * because we've poisoned the pool, but we can't continue to
+ * hold it either, because other routines may want it.
+ */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /*
+ * Attach to each sub-region and destroy it.
+ *
+ * !!!
+ * The REGION_CREATE_OK flag is set for Windows/95 -- regions
+ * are zero'd out when the last reference to the region goes
+ * away, in which case the underlying OS region code requires
+ * callers be prepared to create the region in order to join it.
+ */
+ memset(&reginfo, 0, sizeof(reginfo));
+restart: for (rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (rp->type == REGION_TYPE_ENV)
+ continue;
+
+ reginfo.id = rp->id;
+ reginfo.flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(dbenv, &reginfo, 0)) != 0) {
+ __db_err(dbenv,
+ "region %s attach: %s", db_strerror(ret));
+ continue;
+ }
+ R_UNLOCK(dbenv, &reginfo);
+ if ((ret = __db_r_detach(dbenv, &reginfo, 1)) != 0) {
+ __db_err(dbenv,
+ "region detach: %s", db_strerror(ret));
+ continue;
+ }
+ /*
+ * If we have an error, we continue so we eventually
+ * reach the end of the list. If we succeed, restart
+ * the list because it was relinked when we destroyed
+ * the entry.
+ */
+ goto restart;
+ }
+
+ /* Destroy the environment's region. */
+ (void)__db_e_detach(dbenv, 1);
+
+ /* Discard any remaining physical files. */
+remfiles: (void)__db_e_remfile(dbenv);
+ } else {
+ /* Unlock the environment. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Discard the environment. */
+ (void)__db_e_detach(dbenv, 0);
+
+ ret = EBUSY;
+ }
+
+done: F_CLR(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
+ F_SET(dbenv, db_env_reset);
+
+ return (ret);
+}
+
+/*
+ * __db_e_remfile --
+ * Discard any region files in the filesystem.
+ */
+static int
+__db_e_remfile(dbenv)
+ DB_ENV *dbenv;
+{
+ static char *old_region_names[] = {
+ "__db_lock.share",
+ "__db_log.share",
+ "__db_mpool.share",
+ "__db_txn.share",
+ NULL
+ };
+ int cnt, fcnt, lastrm, ret;
+ u_int8_t saved_byte;
+ const char *dir;
+ char *p, **names, *path, buf[sizeof(DB_REGION_FMT) + 20];
+
+ /* Get the full path of a file in the environment. */
+ (void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
+ if ((ret = __db_appname(dbenv, DB_APP_NONE, buf, 0, NULL, &path)) != 0)
+ return (ret);
+
+ /* Get the parent directory for the environment. */
+ if ((p = __db_rpath(path)) == NULL) {
+ p = path;
+ saved_byte = *p;
+
+ dir = PATH_DOT;
+ } else {
+ saved_byte = *p;
+ *p = '\0';
+
+ dir = path;
+ }
+
+ /* Get the list of file names. */
+ if ((ret = __os_dirlist(dbenv, dir, &names, &fcnt)) != 0)
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+
+ /* Restore the path, and free it. */
+ *p = saved_byte;
+ __os_free(dbenv, path);
+
+ if (ret != 0)
+ return (ret);
+
+ /*
+ * Search for valid region names, and remove them. We remove the
+ * environment region last, because it's the key to this whole mess.
+ */
+ for (lastrm = -1, cnt = fcnt; --cnt >= 0;) {
+ if (strlen(names[cnt]) != DB_REGION_NAME_LENGTH ||
+ memcmp(names[cnt], DB_REGION_FMT, DB_REGION_NAME_NUM) != 0)
+ continue;
+ if (strcmp(names[cnt], DB_REGION_ENV) == 0) {
+ lastrm = cnt;
+ continue;
+ }
+ for (p = names[cnt] + DB_REGION_NAME_NUM;
+ *p != '\0' && isdigit((int)*p); ++p)
+ ;
+ if (*p != '\0')
+ continue;
+
+ if (__db_appname(dbenv,
+ DB_APP_NONE, names[cnt], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+ }
+
+ if (lastrm != -1)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, names[lastrm], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+ __os_dirfree(dbenv, names, fcnt);
+
+ /*
+ * !!!
+ * Backward compatibility -- remove region files from releases
+ * before 2.8.XX.
+ */
+ for (names = (char **)old_region_names; *names != NULL; ++names)
+ if (__db_appname(dbenv,
+ DB_APP_NONE, *names, 0, NULL, &path) == 0) {
+ (void)__os_unlink(dbenv, path);
+ __os_free(dbenv, path);
+ }
+
+ return (0);
+}
+
+/*
+ * __db_e_stat
+ * Statistics for the environment.
+ *
+ * PUBLIC: int __db_e_stat __P((DB_ENV *,
+ * PUBLIC: REGENV *, REGION *, int *, u_int32_t));
+ */
+int
+__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt, flags)
+ DB_ENV *dbenv;
+ REGENV *arg_renv;
+ REGION *arg_regions;
+ int *arg_regions_cnt;
+ u_int32_t flags;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REGION *rp;
+ int n, ret;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ rp = infop->rp;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &rp->mutex);
+
+ *arg_renv = *renv;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ renv->mutex.mutex_set_nowait = 0;
+ renv->mutex.mutex_set_wait = 0;
+ }
+
+ for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ n < *arg_regions_cnt && rp != NULL;
+ ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ arg_regions[n] = *rp;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ rp->mutex.mutex_set_nowait = 0;
+ rp->mutex.mutex_set_wait = 0;
+ }
+ }
+
+ /* Release the lock. */
+ rp = infop->rp;
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ *arg_regions_cnt = n == 0 ? n : n - 1;
+
+ return (0);
+}
+
+/*
+ * __db_r_attach
+ * Join/create a region.
+ *
+ * PUBLIC: int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+ */
+int
+__db_r_attach(dbenv, infop, size)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ size_t size;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret;
+ char buf[sizeof(DB_REGION_FMT) + 20];
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /*
+ * Find or create a REGION structure for this region. If we create
+ * it, the REGION_CREATE flag will be set in the infop structure.
+ */
+ F_CLR(infop, REGION_CREATE);
+ if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0) {
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+ }
+ infop->rp = rp;
+ infop->type = rp->type;
+ infop->id = rp->id;
+
+ /* If we're creating the region, set the desired size. */
+ if (F_ISSET(infop, REGION_CREATE))
+ rp->size = (roff_t)size;
+
+ /* Join/create the underlying region. */
+ (void)snprintf(buf, sizeof(buf), DB_REGION_FMT, infop->id);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
+ goto err;
+ if ((ret = __os_r_attach(dbenv, infop, rp)) != 0)
+ goto err;
+
+ /*
+ * Fault the pages into memory. Note, do this BEFORE we initialize
+ * anything because we're writing pages in created regions, not just
+ * reading them.
+ */
+ (void)__db_faultmem(dbenv,
+ infop->addr, rp->size, F_ISSET(infop, REGION_CREATE));
+
+ /*
+ * !!!
+ * The underlying layer may have just decided that we are going
+ * to create the region. There are various system issues that
+ * can result in a useless region that requires re-initialization.
+ *
+ * If we created the region, initialize it for allocation.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ ((REGION *)(infop->addr))->magic = DB_REGION_MAGIC;
+
+ (void)__db_shalloc_init(infop->addr, rp->size);
+ }
+
+ /*
+ * If the underlying REGION isn't the environment, acquire a lock
+ * for it and release our lock on the environment.
+ */
+ if (infop->type != REGION_TYPE_ENV) {
+ MUTEX_LOCK(dbenv, &rp->mutex);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ }
+
+ return (0);
+
+ /* Discard the underlying region. */
+err: if (infop->addr != NULL)
+ (void)__os_r_detach(dbenv,
+ infop, F_ISSET(infop, REGION_CREATE));
+ infop->rp = NULL;
+ infop->id = INVALID_REGION_ID;
+
+ /* Discard the REGION structure if we created it. */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ (void)__db_des_destroy(dbenv, rp);
+ F_CLR(infop, REGION_CREATE);
+ }
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ return (ret);
+}
+
+/*
+ * __db_r_detach --
+ * Detach from a region.
+ *
+ * PUBLIC: int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__db_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGENV *renv;
+ REGION *rp;
+ int ret, t_ret;
+
+ renv = ((REGINFO *)dbenv->reginfo)->primary;
+ rp = infop->rp;
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
+
+ /* Lock the environment. */
+ MUTEX_LOCK(dbenv, &renv->mutex);
+
+ /* Acquire the lock for the REGION. */
+ MUTEX_LOCK(dbenv, &rp->mutex);
+
+ /*
+ * We need to call destroy on per-subsystem info before
+ * we free the memory associated with the region.
+ */
+ if (destroy)
+ __db_region_destroy(dbenv, infop);
+
+ /* Detach from the underlying OS region. */
+ ret = __os_r_detach(dbenv, infop, destroy);
+
+ /* Release the REGION lock. */
+ MUTEX_UNLOCK(dbenv, &rp->mutex);
+
+ /* If we destroyed the region, discard the REGION structure. */
+ if (destroy &&
+ ((t_ret = __db_des_destroy(dbenv, rp)) != 0) && ret == 0)
+ ret = t_ret;
+
+ /* Release the environment lock. */
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ /* Destroy the structure. */
+ if (infop->name != NULL)
+ __os_free(dbenv, infop->name);
+
+ return (ret);
+}
+
+/*
+ * __db_des_get --
+ * Return a reference to the shared information for a REGION,
+ * optionally creating a new entry.
+ */
+static int
+__db_des_get(dbenv, env_infop, infop, rpp)
+ DB_ENV *dbenv;
+ REGINFO *env_infop, *infop;
+ REGION **rpp;
+{
+ REGENV *renv;
+ REGION *rp, *first_type;
+ u_int32_t maxid;
+ int ret;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ *rpp = NULL;
+ renv = env_infop->primary;
+
+ /*
+ * If the caller wants to join a region, walk through the existing
+ * regions looking for a matching ID (if ID specified) or matching
+ * type (if type specified). If we return based on a matching type
+ * return the "primary" region, that is, the first region that was
+ * created of this type.
+ *
+ * Track the maximum region ID so we can allocate a new region,
+ * note that we have to start at 1 because the primary environment
+ * uses ID == 1.
+ */
+ maxid = REGION_ID_ENV;
+ for (first_type = NULL,
+ rp = SH_LIST_FIRST(&renv->regionq, __db_region);
+ rp != NULL; rp = SH_LIST_NEXT(rp, q, __db_region)) {
+ if (infop->id != INVALID_REGION_ID) {
+ if (infop->id == rp->id)
+ break;
+ continue;
+ }
+ if (infop->type == rp->type &&
+ F_ISSET(infop, REGION_JOIN_OK) &&
+ (first_type == NULL || first_type->id > rp->id))
+ first_type = rp;
+
+ if (rp->id > maxid)
+ maxid = rp->id;
+ }
+ if (rp == NULL)
+ rp = first_type;
+
+ /*
+ * If we didn't find a region and we can't create the region, fail.
+ * The caller generates any error message.
+ */
+ if (rp == NULL && !F_ISSET(infop, REGION_CREATE_OK))
+ return (ENOENT);
+
+ /*
+ * If we didn't find a region, create and initialize a REGION structure
+ * for the caller. If id was set, use that value, otherwise we use the
+ * next available ID.
+ */
+ if (rp == NULL) {
+ if ((ret = __db_shalloc(env_infop->addr,
+ sizeof(REGION), MUTEX_ALIGN, &rp)) != 0)
+ return (ret);
+
+ /* Initialize the region. */
+ memset(rp, 0, sizeof(*rp));
+ if ((ret = __db_mutex_setup(dbenv, env_infop, &rp->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
+ __db_shalloc_free(env_infop->addr, rp);
+ return (ret);
+ }
+ rp->segid = INVALID_REGION_SEGID;
+
+ /*
+ * Set the type and ID; if no region ID was specified,
+ * allocate one.
+ */
+ rp->type = infop->type;
+ rp->id = infop->id == INVALID_REGION_ID ? maxid + 1 : infop->id;
+
+ SH_LIST_INSERT_HEAD(&renv->regionq, rp, q, __db_region);
+ F_SET(infop, REGION_CREATE);
+ }
+
+ *rpp = rp;
+ return (0);
+}
+
+/*
+ * __db_des_destroy --
+ * Destroy a reference to a REGION.
+ */
+static int
+__db_des_destroy(dbenv, rp)
+ DB_ENV *dbenv;
+ REGION *rp;
+{
+ REGINFO *infop;
+
+ /*
+ * !!!
+ * Called with the environment already locked.
+ */
+ infop = dbenv->reginfo;
+
+ SH_LIST_REMOVE(rp, q, __db_region);
+ __db_mutex_destroy(&rp->mutex);
+ __db_shalloc_free(infop->addr, rp);
+
+ return (0);
+}
+
+/*
+ * __db_faultmem --
+ * Fault the region into memory.
+ */
+static int
+__db_faultmem(dbenv, addr, size, created)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t size;
+ int created;
+{
+ int ret;
+ u_int8_t *p, *t;
+
+ /*
+ * It's sometimes significantly faster to page-fault in all of the
+ * region's pages before we run the application, as we see nasty
+ * side-effects when we page-fault while holding various locks, i.e.,
+ * the lock takes a long time to acquire because of the underlying
+ * page fault, and the other threads convoy behind the lock holder.
+ *
+ * If we created the region, we write a non-zero value so that the
+ * system can't cheat. If we're just joining the region, we can
+ * only read the value and try to confuse the compiler sufficiently
+ * that it doesn't figure out that we're never really using it.
+ */
+ ret = 0;
+ if (F_ISSET(dbenv, DB_ENV_REGION_INIT)) {
+ if (created)
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ p[0] = 0xdb;
+ else
+ for (p = addr, t = (u_int8_t *)addr + size;
+ p < t; p += OS_VMPAGESIZE)
+ ret |= p[0];
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_region_destroy --
+ * Destroy per-subsystem region information.
+ * Called with the region already locked.
+ */
+static void
+__db_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ __lock_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_LOG:
+ __log_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_MPOOL:
+ __mpool_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_TXN:
+ __txn_region_destroy(dbenv, infop);
+ break;
+ case REGION_TYPE_ENV:
+ case REGION_TYPE_MUTEX:
+ break;
+ default:
+ DB_ASSERT(0);
+ break;
+ }
+}
diff --git a/storage/bdb/fileops/fileops.src b/storage/bdb/fileops/fileops.src
new file mode 100644
index 00000000000..1fd39dc3c45
--- /dev/null
+++ b/storage/bdb/fileops/fileops.src
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: fileops.src,v 1.8 2002/04/06 18:25:55 bostic Exp $
+ */
+
+PREFIX __fop
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE #include "dbinc/fop.h"
+INCLUDE
+
+/*
+ * create -- create a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ * mode: file system mode
+ */
+BEGIN create 143
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG mode u_int32_t o
+END
+
+/*
+ * remove -- remove a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ */
+BEGIN remove 144
+DBT name DBT s
+DBT fid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * write: log the writing of data into an object.
+ *
+ * name: file containing the page.
+ * appname: indicates if the name needs to go through __db_appname
+ * offset: offset in the file.
+ * page: the actual meta-data page.
+ * flag: non-0 indicates that this is a tempfile, so we needn't undo
+ * these modifications (we'll toss the file).
+ */
+BEGIN write 145
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG offset u_int32_t lu
+PGDBT page DBT s
+ARG flag u_int32_t lu
+END
+
+/*
+ * rename: move a file from one name to another.
+ * The appname value indicates if this is a path name that should be used
+ * directly (i.e., no interpretation) or if it is a pathname that should
+ * be interpreted via calls to __db_appname. The fileid is the 20-byte
+ * DB fileid of the file being renamed. We need to check it on recovery
+ * so that we don't inadvertently overwrite good files.
+ */
+BEGIN rename 146
+DBT oldname DBT s
+DBT newname DBT s
+DBT fileid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * File removal record. This is a DB-level log record that indicates
+ * we've just completed some form of file removal. The purpose of this
+ * log record is to logically identify the particular instance of the
+ * named file so that during recovery, in deciding if we should roll-forward
+ * a remove or a rename, we can make sure that we don't roll one forward and
+ * delete or overwrite the wrong file.
+ * real_fid: The 20-byte unique file identifier of the original file being
+ * removed.
+ * tmp_fid: The unique fid of the tmp file that is removed.
+ * name: The pre- __db_appname name of the file
+ * child: The transaction that removed or renamed the file.
+ */
+ */
+BEGIN file_remove 141
+DBT real_fid DBT s
+DBT tmp_fid DBT s
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG child u_int32_t lx
+END
diff --git a/storage/bdb/fileops/fop_basic.c b/storage/bdb/fileops/fop_basic.c
new file mode 100644
index 00000000000..08160ab2e1a
--- /dev/null
+++ b/storage/bdb/fileops/fop_basic.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_basic.c,v 1.23 2002/08/11 02:11:23 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+/*
+ * This file implements the basic file-level operations. This code
+ * ought to be fairly independent of DB, other than through its
+ * error-reporting mechanism.
+ */
+
+/*
+ * __fop_create --
+ * Create a (transactionally protected) file system object. This is used
+ * to create DB files now, potentially blobs, queue extents and anything
+ * else you wish to store in a file system object.
+ *
+ * PUBLIC: int __fop_create __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_FH *, const char *, APPNAME, int));
+ */
+int
+__fop_create(dbenv, txn, fhp, name, appname, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+ APPNAME appname;
+ int mode;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data;
+ char *real_name;
+ int do_close, ret;
+
+ ret = 0;
+ real_name = NULL;
+
+ if (fhp != NULL)
+ do_close = 0;
+ else {
+ fhp = &fh;
+ memset(fhp, 0, sizeof(fh));
+ do_close = 1;
+ }
+
+ if (mode == 0)
+ mode = __db_omode("rw----");
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)name;
+ data.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_create_log(dbenv,
+ txn, &lsn, DB_FLUSH, &data, (u_int32_t)appname, mode)) != 0)
+ goto err;
+ }
+
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_POSTLOG, ret, name);
+
+ ret =
+ __os_open(dbenv, real_name, DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (do_close && F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_remove --
+ * Remove a file system object.
+ *
+ * PUBLIC: int __fop_remove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, u_int8_t *, const char *, APPNAME));
+ */
+int
+__fop_remove(dbenv, txn, fileid, name, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ u_int8_t *fileid;
+ const char *name;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fdbt, ndbt;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (txn == NULL) {
+ if (fileid != NULL && (ret = dbenv->memp_nameop(
+ dbenv, fileid, NULL, real_name, NULL)) != 0)
+ goto err;
+ } else {
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&fdbt, 0, sizeof(ndbt));
+ fdbt.data = fileid;
+ fdbt.size = fileid == NULL ? 0 : DB_FILE_ID_LEN;
+ memset(&ndbt, 0, sizeof(ndbt));
+ ndbt.data = (void *)name;
+ ndbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_remove_log(dbenv,
+ txn, &lsn, 0, &ndbt, &fdbt, appname)) != 0)
+ goto err;
+ }
+ ret = __txn_remevent(dbenv, txn, real_name, fileid);
+ }
+
+err: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_write
+ *
+ * Write "size" bytes from "buf" to file "name" beginning at offset "off."
+ * If the file is open, supply a handle in fhp. Istmp indicate if this is
+ * an operation that needs to be undone in the face of failure (i.e., if
+ * this is a write to a temporary file, we're simply going to remove the
+ * file, so don't worry about undoing the write).
+ *
+ * Currently, we *only* use this with istmp true. If we need more general
+ * handling, then we'll have to zero out regions on abort (and possibly
+ * log the before image of the data in the log record).
+ *
+ * PUBLIC: int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME,
+ * PUBLIC: DB_FH *, u_int32_t, u_int8_t *, u_int32_t, u_int32_t));
+ */
+int
+__fop_write(dbenv, txn, name, appname, fhp, off, buf, size, istmp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ APPNAME appname;
+ DB_FH *fhp;
+ u_int32_t off;
+ u_int8_t *buf;
+ u_int32_t size, istmp;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data, namedbt;
+ char *real_name;
+ int ret, t_ret, we_opened;
+ size_t nbytes;
+
+ ret = 0;
+ we_opened = 0;
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = buf;
+ data.size = size;
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (void *)name;
+ namedbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_write_log(dbenv,
+ txn, &lsn, 0, &namedbt, appname, off, &data, istmp)) != 0)
+ goto err;
+ }
+
+ if (fhp == NULL) {
+ /* File isn't open; we need to reopen it. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto err;
+ fhp = &fh;
+ we_opened = 1;
+ } else
+ we_opened = 0;
+
+ /* Seek to offset. */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, off, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ /* Now do the write. */
+ if ((ret = __os_write(dbenv, fhp, buf, size, &nbytes)) != 0)
+ goto err;
+
+err: if (we_opened)
+ if ((t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_rename --
+ * Change a file's name.
+ *
+ * PUBLIC: int __fop_rename __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int8_t *, APPNAME));
+ */
+int
+__fop_rename(dbenv, txn, oldname, newname, fid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *oldname;
+ const char *newname;
+ u_int8_t *fid;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fiddbt, new, old;
+ int ret;
+ char *n, *o;
+
+ if ((ret = __db_appname(dbenv, appname, oldname, 0, NULL, &o)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv, appname, newname, 0, NULL, &n)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&old, 0, sizeof(old));
+ memset(&new, 0, sizeof(new));
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ old.data = (void *)oldname;
+ old.size = (u_int32_t)strlen(oldname) + 1;
+ new.data = (void *)newname;
+ new.size = (u_int32_t)strlen(newname) + 1;
+ fiddbt.data = fid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ if ((ret = __fop_rename_log(dbenv, txn, &lsn,
+ DB_FLUSH, &old, &new, &fiddbt, (u_int32_t)appname)) != 0)
+ goto err;
+ }
+
+ ret = dbenv->memp_nameop(dbenv, fid, newname, o, n);
+
+err: if (o != oldname)
+ __os_free(dbenv, o);
+ if (n != newname)
+ __os_free(dbenv, n);
+ return (ret);
+}
diff --git a/storage/bdb/fileops/fop_rec.c b/storage/bdb/fileops/fop_rec.c
new file mode 100644
index 00000000000..67720e01d13
--- /dev/null
+++ b/storage/bdb/fileops/fop_rec.c
@@ -0,0 +1,308 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_rec.c,v 1.18 2002/08/14 20:27:01 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+/*
+ * __fop_create_recover --
+ * Recovery function for create.
+ *
+ * PUBLIC: int __fop_create_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_create_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_FH fh;
+ __fop_create_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_create_print);
+ REC_NOOP_INTRO(__fop_create_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_UNDO(op))
+ (void)__os_unlink(dbenv, real_name);
+ else if (DB_REDO(op))
+ if ((ret = __os_open(dbenv, real_name,
+ DB_OSO_CREATE | DB_OSO_EXCL, argp->mode, &fh)) == 0)
+ __os_closehandle(dbenv, &fh);
+
+ *lsnp = argp->prev_lsn;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_remove_recover --
+ * Recovery function for remove.
+ *
+ * PUBLIC: int __fop_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_remove_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_remove_print);
+ REC_NOOP_INTRO(__fop_remove_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_REDO(op) && (ret = dbenv->memp_nameop(dbenv,
+ (u_int8_t *)argp->fid.data, NULL, real_name, NULL)) != 0)
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_write_recover --
+ * Recovery function for writechunk.
+ *
+ * PUBLIC: int __fop_write_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_write_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_write_args *argp;
+ int ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_write_print);
+ REC_NOOP_INTRO(__fop_write_read);
+
+ if (DB_UNDO(op))
+ DB_ASSERT(argp->flag != 0);
+ else if (DB_REDO(op))
+ ret = __fop_write(dbenv,
+ argp->txnid, argp->name.data, argp->appname, NULL,
+ argp->offset, argp->page.data, argp->page.size, argp->flag);
+
+ *lsnp = argp->prev_lsn;
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __fop_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_rename_args *argp;
+ DBMETA *meta;
+ char *real_new, *real_old, *src;
+ int ret;
+ u_int8_t *fileid, mbuf[DBMETASIZE];
+
+ real_new = NULL;
+ real_old = NULL;
+ ret = 0;
+ meta = (DBMETA *)&mbuf[0];
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_rename_print);
+ REC_NOOP_INTRO(__fop_rename_read);
+ fileid = argp->fileid.data;
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->newname.data, 0, NULL, &real_new)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->oldname.data, 0, NULL, &real_old)) != 0)
+ goto out;
+
+ /*
+ * Verify that we are manipulating the correct file. We should always
+ * be OK on an ABORT or an APPLY, but during recovery, we have to
+ * check.
+ */
+ if (op != DB_TXN_ABORT && op != DB_TXN_APPLY) {
+ src = DB_UNDO(op) ? real_new : real_old;
+ /*
+ * Interpret any error as meaning that the file either doesn't
+ * exist, doesn't have a meta-data page, or is in some other
+ * way, shape or form, incorrect, so that we should not restore
+ * it.
+ */
+ if (__fop_read_meta(
+ dbenv, src, mbuf, DBMETASIZE, NULL, 1, 0) != 0)
+ goto done;
+ if (__db_chk_meta(dbenv, NULL, meta, 1) != 0)
+ goto done;
+ if (memcmp(argp->fileid.data, meta->uid, DB_FILE_ID_LEN) != 0)
+ goto done;
+ }
+
+ if (DB_UNDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->oldname.data, real_new, real_old);
+ if (DB_REDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->newname.data, real_old, real_new);
+
+done: *lsnp = argp->prev_lsn;
+out: if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_file_remove_recover --
+ * Recovery function for file_remove. On the REDO pass, we need to
+ * make sure no one recreated the file while we weren't looking. On an
+ * undo pass must check if the file we are interested in is the one that
+ * exists and then set the status of the child transaction depending on
+ * what we find out.
+ *
+ * PUBLIC: int __fop_file_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_file_remove_args *argp;
+ DBMETA *meta;
+ char *real_name;
+ int is_real, is_tmp, ret;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t cstat;
+
+ real_name = NULL;
+ is_real = is_tmp = 0;
+ meta = (DBMETA *)&mbuf[0];
+ REC_PRINT(__fop_file_remove_print);
+ REC_NOOP_INTRO(__fop_file_remove_read);
+
+ /*
+ * This record is only interesting on the backward, forward, and
+ * apply phases.
+ */
+ if (op != DB_TXN_BACKWARD_ROLL &&
+ op != DB_TXN_FORWARD_ROLL && op != DB_TXN_APPLY)
+ goto done;
+
+ if ((ret = __db_appname(dbenv,
+ (APPNAME)argp->appname, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /* Verify that we are manipulating the correct file. */
+ if ((ret = __fop_read_meta(dbenv,
+ real_name, mbuf, DBMETASIZE, NULL, 1, 0)) != 0) {
+ /* File does not exist. */
+ cstat = TXN_EXPECTED;
+ } else {
+ /*
+ * We can ignore errors here since we'll simply fail the
+ * checks below and assume this is the wrong file.
+ */
+ (void)__db_chk_meta(dbenv, NULL, meta, 1);
+ is_real =
+ memcmp(argp->real_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+ is_tmp =
+ memcmp(argp->tmp_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+
+ if (!is_real && !is_tmp)
+ /* File exists, but isn't what we were removing. */
+ cstat = TXN_IGNORE;
+ else
+ /* File exists and is the one that we were removing. */
+ cstat = TXN_COMMIT;
+ }
+
+ if (DB_UNDO(op)) {
+ /* On the backward pass, we leave a note for the child txn. */
+ if ((ret = __db_txnlist_update(dbenv,
+ info, argp->child, cstat, NULL)) == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->child, cstat, NULL);
+ } else if (DB_REDO(op)) {
+ /*
+ * On the forward pass, check if someone recreated the
+ * file while we weren't looking.
+ */
+ if (cstat == TXN_COMMIT)
+ (void)dbenv->memp_nameop(dbenv,
+ is_real ? argp->real_fid.data : argp->tmp_fid.data,
+ NULL, real_name, NULL);
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
diff --git a/storage/bdb/fileops/fop_util.c b/storage/bdb/fileops/fop_util.c
new file mode 100644
index 00000000000..ea6d86ab08d
--- /dev/null
+++ b/storage/bdb/fileops/fop_util.c
@@ -0,0 +1,928 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_util.c,v 1.52 2002/09/10 02:41:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __fop_set_pgsize __P((DB *, DB_FH *, const char *));
+
+/*
+ * Acquire the environment meta-data lock. The parameters are the
+ * environment (ENV), the locker id to use in acquiring the lock (ID)
+ * and a pointer to a DB_LOCK.
+ */
+#define GET_ENVLOCK(ENV, ID, L) do { \
+ DBT __dbt; \
+ u_int32_t __lockval; \
+ \
+ if (LOCKING_ON((ENV))) { \
+ __lockval = 0; \
+ __dbt.data = &__lockval; \
+ __dbt.size = sizeof(__lockval); \
+ if ((ret = (ENV)->lock_get((ENV), (ID), \
+ 0, &__dbt, DB_LOCK_WRITE, (L))) != 0) \
+ goto err; \
+ } \
+} while (0)
+
+#define REL_ENVLOCK(ENV, L) \
+ (!LOCK_ISSET(*(L)) ? 0 : (ENV)->lock_put((ENV), (L)))
+
+/*
+ * If our caller is doing fcntl(2) locking, then we can't close it
+ * because that would discard the caller's lock. Otherwise, close
+ * the handle.
+ */
+#define CLOSE_HANDLE(D, F) { \
+ if (F_ISSET((F), DB_FH_VALID)) { \
+ if (LF_ISSET(DB_FCNTL_LOCKING)) \
+ (D)->saved_open_fhp = (F); \
+ else if ((ret = __os_closehandle((D)->dbenv,(F))) != 0) \
+ goto err; \
+ } \
+}
+
+/*
+ * __fop_lock_handle --
+ *
+ * Get the handle lock for a database. If the envlock is specified,
+ * do this as a lock_vec call that releases the enviroment lock before
+ * acquiring the handle lock.
+ *
+ * PUBLIC: int __fop_lock_handle __P((DB_ENV *,
+ * PUBLIC: DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t));
+ *
+ */
+int
+__fop_lock_handle(dbenv, dbp, locker, mode, elock, flags)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t locker;
+ db_lockmode_t mode;
+ DB_LOCK *elock;
+ u_int32_t flags;
+{
+ DBT fileobj;
+ DB_LOCKREQ reqs[2], *ereq;
+ DB_LOCK_ILOCK lock_desc;
+ int ret;
+
+ if (!LOCKING_ON(dbenv) || F_ISSET(dbp, DB_AM_COMPENSATE))
+ return (0);
+
+ /*
+ * If we are in recovery, the only locking we should be
+ * doing is on the global environment.
+ */
+ if (IS_RECOVERING(dbenv)) {
+ if (elock != NULL)
+ REL_ENVLOCK(dbenv, elock);
+ return (0);
+ }
+
+ memcpy(&lock_desc.fileid, &dbp->fileid, DB_FILE_ID_LEN);
+ lock_desc.pgno = dbp->meta_pgno;
+ lock_desc.type = DB_HANDLE_LOCK;
+
+ memset(&fileobj, 0, sizeof(fileobj));
+ fileobj.data = &lock_desc;
+ fileobj.size = sizeof(lock_desc);
+ DB_TEST_SUBLOCKS(dbenv, flags);
+ if (elock == NULL)
+ ret = dbenv->lock_get(dbenv, locker,
+ flags, &fileobj, mode, &dbp->handle_lock);
+ else {
+ reqs[0].op = DB_LOCK_PUT;
+ reqs[0].lock = *elock;
+ reqs[1].op = DB_LOCK_GET;
+ reqs[1].mode = mode;
+ reqs[1].obj = &fileobj;
+ reqs[1].timeout = 0;
+ if ((ret = __lock_vec(dbenv,
+ locker, flags, reqs, 2, &ereq)) == 0) {
+ dbp->handle_lock = reqs[1].lock;
+ LOCK_INIT(*elock);
+ } else if (ereq != reqs)
+ LOCK_INIT(*elock);
+ }
+
+ dbp->cur_lid = locker;
+ return (ret);
+}
+
+/*
+ * __fop_file_setup --
+ *
+ * Perform all the needed checking and locking to open up or create a
+ * file.
+ *
+ * There's a reason we don't push this code down into the buffer cache.
+ * The problem is that there's no information external to the file that
+ * we can use as a unique ID. UNIX has dev/inode pairs, but they are
+ * not necessarily unique after reboot, if the file was mounted via NFS.
+ * Windows has similar problems, as the FAT filesystem doesn't maintain
+ * dev/inode numbers across reboot. So, we must get something from the
+ * file we can use to ensure that, even after a reboot, the file we're
+ * joining in the cache is the right file for us to join. The solution
+ * we use is to maintain a file ID that's stored in the database, and
+ * that's why we have to open and read the file before calling into the
+ * buffer cache or obtaining a lock (we use this unique fileid to lock
+ * as well as to identify like files in the cache).
+ *
+ * PUBLIC: int __fop_file_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, int, u_int32_t, u_int32_t *));
+ */
+int
+__fop_file_setup(dbp, txn, name, mode, flags, retidp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ int mode;
+ u_int32_t flags, *retidp;
+{
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ DB_LOCK elock, tmp_lock;
+ DB_TXN *stxn;
+ db_lockmode_t lmode;
+ u_int32_t locker, oflags;
+ u_int8_t mbuf[DBMETASIZE];
+ int created_fhp, created_locker, ret, tmp_created, truncating;
+ char *real_name, *real_tmpname, *tmpname;
+
+ DB_ASSERT(name != NULL);
+
+ *retidp = TXN_INVALID;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ LOCK_INIT(tmp_lock);
+ stxn = NULL;
+ created_fhp = created_locker = 0;
+ real_name = real_tmpname = tmpname = NULL;
+ tmp_created = truncating = 0;
+
+ /*
+ * If we open a file handle and our caller is doing fcntl(2) locking,
+ * we can't close it because that would discard the caller's lock.
+ * Save it until we close the DB handle.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ if ((ret = __os_malloc(dbenv, sizeof(*fhp), &fhp)) != 0)
+ return (ret);
+ created_fhp = 1;
+ } else
+ fhp = &fh;
+ memset(fhp, 0, sizeof(*fhp));
+
+ /*
+ * Get a lockerid for this handle. There are paths through queue
+ * rename and remove where this dbp already has a locker, so make
+ * sure we don't clobber it and conflict.
+ */
+ if (LOCKING_ON(dbenv) &&
+ !F_ISSET(dbp, DB_AM_COMPENSATE) && dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ created_locker = 1;
+ }
+
+ locker = txn == NULL ? dbp->lid : txn->txnid;
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /* Fill in the default file mode. */
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+
+ oflags = 0;
+ if (LF_ISSET(DB_RDONLY))
+ oflags |= DB_OSO_RDONLY;
+
+retry: if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if ((ret = __os_exists(real_name, NULL)) == 0) {
+ if (LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+reopen: if ((ret = __fop_read_meta(dbenv, real_name,
+ mbuf, sizeof(mbuf), fhp, 0, oflags)) != 0)
+ goto err;
+
+ if ((ret = __db_meta_setup(dbenv,
+ dbp, real_name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, get our handle lock. */
+ lmode = LF_ISSET(DB_TRUNCATE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, NULL, DB_LOCK_NOWAIT)) == 0) {
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+ } else {
+ /* Someone else has file locked; need to wait. */
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+ ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, &elock, 0);
+ if (ret == DB_LOCK_NOTEXIST)
+ goto retry;
+ if (ret != 0)
+ goto err;
+ /*
+ * XXX I need to convince myself that I don't need
+ * to re-read the metadata page here.
+ * XXX If you do need to re-read it you'd better
+ * decrypt it too...
+ */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, fhp)) != 0)
+ goto err;
+ }
+
+ /*
+ * Check for a truncate which needs to leap over to the
+ * create case.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ /*
+ * Sadly, we need to close and reopen the handle
+ * in order to do the actual truncate. We couldn't
+ * do the truncate on the initial open because we
+ * needed to read the old file-id in order to lock.
+ */
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+ if ((ret = __os_open(dbenv,
+ real_name, DB_OSO_TRUNC, 0, fhp)) != 0)
+ goto err;
+ /*
+ * This is not-transactional, so we'll do the
+ * open/create in-place.
+ */
+ tmp_lock = dbp->handle_lock;
+ truncating = 1;
+ tmpname = (char *)name;
+ goto creat2;
+ }
+
+ /*
+ * Check for a file in the midst of a rename
+ */
+ if (F_ISSET(dbp, DB_AM_IN_RENAME)) {
+ if (LF_ISSET(DB_CREATE)) {
+ F_CLR(dbp, DB_AM_IN_RENAME);
+ goto create;
+ } else {
+ ret = ENOENT;
+ goto err;
+ }
+ }
+
+ CLOSE_HANDLE(dbp, fhp);
+ goto done;
+ }
+
+ /* File does not exist. */
+ if (!LF_ISSET(DB_CREATE))
+ goto err;
+ ret = 0;
+
+ /*
+ * Need to create file; we need to set up the file,
+ * the fileid and the locks. Then we need to call
+ * the appropriate routines to create meta-data pages.
+ */
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+
+create: if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ goto err;
+ if (TXN_ON(dbenv) && txn != NULL &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv,
+ stxn, fhp, tmpname, DB_APP_DATA, mode)) != 0)
+ goto err;
+ tmp_created = 1;
+creat2: if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, tmpname, 0, NULL, &real_tmpname)) != 0)
+ goto err;
+
+ /* Set the pagesize if it isn't yet set. */
+ if (dbp->pgsize == 0 &&
+ (ret = __fop_set_pgsize(dbp, fhp, real_tmpname)) != 0)
+ goto errmsg;
+
+ /* Construct a file_id. */
+ if ((ret = __os_fileid(dbenv, real_tmpname, 1, dbp->fileid)) != 0)
+ goto errmsg;
+
+ if ((ret = __db_new_file(dbp, stxn, fhp, tmpname)) != 0)
+ goto err;
+ CLOSE_HANDLE(dbp, fhp);
+
+ /* Now move the file into place. */
+ if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (!truncating && __os_exists(real_name, NULL) == 0) {
+ /*
+ * Someone managed to create the file; remove our temp
+ * and try to open the file that now exists.
+ */
+ (void)__fop_remove(dbenv,
+ NULL, dbp->fileid, tmpname, DB_APP_DATA);
+ if (LOCKING_ON(dbenv))
+ dbenv->lock_put(dbenv, &dbp->handle_lock);
+ LOCK_INIT(dbp->handle_lock);
+
+ /* If we have a saved handle; close it. */
+ if (LF_ISSET(DB_FCNTL_LOCKING))
+ (void)__os_closehandle(dbenv, fhp);
+ if (stxn != NULL) {
+ ret = stxn->abort(stxn);
+ stxn = NULL;
+ }
+ if (ret != 0)
+ goto err;
+ goto reopen;
+ }
+
+ /* We've successfully created, move the file into place. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+ if (!truncating && (ret = __fop_rename(dbenv,
+ stxn, tmpname, name, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+
+ /* If this was a truncate; release lock on the old file. */
+ if (LOCK_ISSET(tmp_lock) && (ret = __lock_put(dbenv, &tmp_lock)) != 0)
+ goto err;
+
+ if (stxn != NULL) {
+ *retidp = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+ } else
+ *retidp = TXN_INVALID;
+
+ if (ret != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_CREATED);
+
+ if (0) {
+errmsg: __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+
+err: if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmp_created && txn == NULL)
+ (void)__fop_remove(dbenv,
+ NULL, NULL, tmpname, DB_APP_DATA);
+ if (F_ISSET(fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, fhp);
+ if (LOCK_ISSET(tmp_lock))
+ __lock_put(dbenv, &tmp_lock);
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+ if (LOCK_ISSET(elock))
+ (void)REL_ENVLOCK(dbenv, &elock);
+ if (created_locker) {
+ (void)__lock_id_free(dbenv, dbp->lid);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+ if (created_fhp)
+ __os_free(dbenv, fhp);
+ }
+
+done: if (!truncating && tmpname != NULL)
+ __os_free(dbenv, tmpname);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ if (real_tmpname != NULL)
+ __os_free(dbenv, real_tmpname);
+
+ return (ret);
+}
+
+/*
+ * __fop_set_pgsize --
+ * Set the page size based on file information.
+ */
+static int
+__fop_set_pgsize(dbp, fhp, name)
+ DB *dbp;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ u_int32_t iopsize;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Use the filesystem's optimum I/O size as the pagesize if a pagesize
+ * not specified. Some filesystems have 64K as their optimum I/O size,
+ * but as that results in fairly large default caches, we limit the
+ * default pagesize to 16K.
+ */
+ if ((ret = __os_ioinfo(dbenv, name, fhp, NULL, NULL, &iopsize)) != 0) {
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ return (ret);
+ }
+ if (iopsize < 512)
+ iopsize = 512;
+ if (iopsize > 16 * 1024)
+ iopsize = 16 * 1024;
+
+ /*
+ * Sheer paranoia, but we don't want anything that's not a power-of-2
+ * (we rely on that for alignment of various types on the pages), and
+ * we want a multiple of the sector size as well. If the value
+ * we got out of __os_ioinfo looks bad, use a default instead.
+ */
+ if (!IS_VALID_PAGESIZE(iopsize))
+ iopsize = DB_DEF_IOSIZE;
+
+ dbp->pgsize = iopsize;
+ F_SET(dbp, DB_AM_PGDEF);
+
+ return (0);
+}
+
+/*
+ * __fop_subdb_setup --
+ *
+ * Subdb setup is significantly simpler than file setup. In terms of
+ * locking, for the duration of the operation/transaction, the locks on
+ * the meta-data page will suffice to protect us from simultaneous operations
+ * on the sub-database. Before we complete the operation though, we'll get a
+ * handle lock on the subdatabase so that on one else can try to remove it
+ * while we've got it open. We use an object that looks like the meta-data
+ * page lock with a different type (DB_HANDLE_LOCK) for the long-term handle.
+ * locks.
+ *
+ * PUBLIC: int __fop_subdb_setup __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, int, u_int32_t));
+ */
+int
+__fop_subdb_setup(dbp, txn, mname, name, mode, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *mname, *name;
+ int mode;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ int do_remove, ret;
+
+ mdbp = NULL;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_master_open(dbp, txn, mname, flags, mode, &mdbp)) != 0)
+ return (ret);
+
+ /*
+ * We are going to close this instance of the master, so we can
+ * steal its handle instead of reopening a handle on the database.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ dbp->saved_open_fhp = mdbp->saved_open_fhp;
+ mdbp->saved_open_fhp = NULL;
+ }
+
+ /* Now copy the pagesize. */
+ dbp->pgsize = mdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+
+ if (name != NULL && (ret = __db_master_update(mdbp, dbp, txn,
+ name, dbp->type, MU_OPEN, NULL, flags)) != 0)
+ goto err;
+
+ /*
+ * Hijack the master's locker ID as well, so that our locks don't
+ * conflict with the master's. Since we're closing the master,
+ * that lid would just have been freed anyway. Once we've gotten
+ * the locker id, we need to acquire the handle lock for this
+ * subdatabase.
+ */
+ dbp->lid = mdbp->lid;
+ mdbp->lid = DB_LOCK_INVALIDID;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, mname);
+
+ /*
+ * We copy our fileid from our master so that we all open
+ * the same file in mpool. We'll use the meta-pgno to lock
+ * so that we end up with different handle locks.
+ */
+
+ memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv, dbp,
+ txn == NULL ? dbp->lid : txn->txnid,
+ F_ISSET(dbp, DB_AM_CREATED) || LF_ISSET(DB_WRITEOPEN) ?
+ DB_LOCK_WRITE : DB_LOCK_READ, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __db_init_subdb(mdbp, dbp, name, txn)) != 0)
+ goto err;
+
+ /*
+ * In the file create case, these happen in separate places so we have
+ * two different tests. They end up in the same place for subdbs, but
+ * for compatibility with file testing, we put them both here anyway.
+ */
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, mname);
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, mname);
+
+ /*
+ * File exists and we have the appropriate locks; we should now
+ * process a normal open.
+ */
+ if (F_ISSET(mdbp, DB_AM_CREATED)) {
+ F_SET(dbp, DB_AM_CREATED_MSTR);
+ F_CLR(mdbp, DB_AM_DISCARD);
+ }
+
+ /*
+ * The master's handle lock is under the control of the
+ * subdb (it acquired the master's locker. We want to
+ * keep the master's handle lock so that no one can remove
+ * the file while the subdb is open. If we register the
+ * trade event and then invalidate the copy of the lock
+ * in the master's handle, that will accomplish this. However,
+ * before we register this event, we'd better remove any
+ * events that we've already registered for the master.
+ */
+
+ if (!F_ISSET(dbp, DB_AM_RECOVER) && txn != NULL) {
+ /* Unregister old master events. */
+ __txn_remlock(dbenv,
+ txn, &mdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ /* Now register the new event. */
+ if ((ret = __txn_lockevent(dbenv,
+ txn, dbp, &mdbp->handle_lock, dbp->lid)) != 0)
+ goto err;
+ }
+ LOCK_INIT(mdbp->handle_lock);
+ return (__db_close_i(mdbp, txn, 0));
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+
+ /* If we created the master file then we need to remove it. */
+ if (mdbp != NULL) {
+ do_remove = F_ISSET(mdbp, DB_AM_CREATED) ? 1 : 0;
+ if (do_remove)
+ F_SET(mdbp, DB_AM_DISCARD);
+ (void)__db_close_i(mdbp, txn, 0);
+ if (do_remove) {
+ (void)db_create(&mdbp, dbp->dbenv, 0);
+ (void)__db_remove_i(mdbp, txn, mname, NULL);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * __fop_remove_setup --
+ * Open handle appropriately and lock for removal of a database file.
+ *
+ * PUBLIC: int __fop_remove_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t));
+ */
+int
+__fop_remove_setup(dbp, txn, name, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ u_int8_t mbuf[DBMETASIZE];
+ int ret;
+
+ COMPQUIET(flags, 0);
+ dbenv = dbp->dbenv;
+ PANIC_CHECK(dbenv);
+ LOCK_INIT(elock);
+
+ /* Create locker if necessary. */
+ if (LOCKING_ON(dbenv)) {
+ if (txn != NULL)
+ dbp->lid = txn->txnid;
+ else if (dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ }
+ }
+
+ /*
+ * Lock environment to protect file open. That will enable us to
+ * read the meta-data page and get the fileid so that we can lock
+ * the handle.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if ((ret = __fop_read_meta(dbenv,
+ name, mbuf, sizeof(mbuf), NULL, 0, 0)) != 0)
+ goto err;
+
+ if ((ret =
+ __db_meta_setup(dbenv, dbp, name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, release the environment and get the handle lock. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ return (0);
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ return (ret);
+}
+
+/*
+ * __fop_read_meta --
+ * Read the meta-data page from a file and return it in buf. The
+ * open file handle is returned in fhp.
+ *
+ * PUBLIC: int __fop_read_meta __P((DB_ENV *,
+ * PUBLIC: const char *, u_int8_t *, size_t, DB_FH *, int, u_int32_t));
+ */
+int
+__fop_read_meta(dbenv, name, buf, size, fhp, errok, flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int8_t *buf;
+ size_t size;
+ DB_FH *fhp;
+ int errok;
+ u_int32_t flags;
+{
+ DB_FH fh, *lfhp;
+ size_t nr;
+ int ret;
+
+ lfhp = fhp == NULL ? &fh : fhp;
+ memset(lfhp, 0, sizeof(*fhp));
+ if ((ret = __os_open(dbenv, name, flags, 0, lfhp)) != 0)
+ goto err;
+ if ((ret = __os_read(dbenv, lfhp, buf, size, &nr)) != 0) {
+ if (!errok)
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ goto err;
+ }
+
+ if (nr != size) {
+ if (!errok)
+ __db_err(dbenv,
+ "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /*
+ * On error, we always close the handle. If there is no error,
+ * then we only return the handle if the user didn't pass us
+ * a handle into which to return it. If fhp is valid, then
+ * lfhp is the same as fhp.
+ */
+ if (F_ISSET(lfhp, DB_FH_VALID) && (ret != 0 || fhp == NULL))
+ __os_closehandle(dbenv, lfhp);
+ return (ret);
+}
+
+/*
+ * __fop_dummy --
+ * This implements the creation and name swapping of dummy files that
+ * we use for remove and rename (remove is simply a rename with a delayed
+ * remove).
+ *
+ * PUBLIC: int __fop_dummy __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__fop_dummy(dbp, txn, old, new, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *old, *new;
+ u_int32_t flags;
+{
+ DB *tmpdbp;
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ DB_LSN lsn;
+ DBT fiddbt, namedbt, tmpdbt;
+ DB_TXN *stxn;
+ char *back;
+ char *realback, *realnew, *realold;
+ int ret, t_ret;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t locker, stxnid;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ realback = NULL;
+ realnew = NULL;
+ realold = NULL;
+ back = NULL;
+ stxn = NULL;
+ tmpdbp = NULL;
+
+ DB_ASSERT(txn != NULL);
+ locker = txn->txnid;
+
+ /* Begin sub transaction to encapsulate the rename. */
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+
+ /* We need to create a dummy file as a place holder. */
+ if ((ret = __db_backup_name(dbenv, new, stxn, &back)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, back, flags, NULL, &realback)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv, stxn, NULL, back, DB_APP_DATA, 0)) != 0)
+ goto err;
+
+ memset(mbuf, 0, sizeof(mbuf));
+ if ((ret =
+ __os_fileid(dbenv, realback, 1, ((DBMETA *)mbuf)->uid)) != 0)
+ goto err;
+ ((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
+ if ((ret = __fop_write(dbenv,
+ stxn, back, DB_APP_DATA, NULL, 0, mbuf, DBMETASIZE, 1)) != 0)
+ goto err;
+
+ /* Create a dummy dbp handle. */
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ goto err;
+ memcpy(&tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN);
+
+ /* Now, lock the name space while we initialize this file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &realnew)) != 0)
+ goto err;
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (__os_exists(realnew, NULL) == 0) {
+ ret = EEXIST;
+ goto err;
+ }
+
+ /*
+ * While we have the namespace locked, do the renames and then
+ * swap for the handle lock.
+ */
+ if ((ret = __fop_rename(dbenv,
+ stxn, old, new, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_rename(dbenv,
+ stxn, back, old, tmpdbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_lock_handle(dbenv,
+ tmpdbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ /*
+ * We just acquired a transactional lock on the tmp handle.
+ * We need to null out the tmp handle's lock so that it
+ * doesn't create problems for us in the close path.
+ */
+ LOCK_INIT(tmpdbp->handle_lock);
+
+ if (stxn != NULL) {
+ /* Commit the child. */
+ stxnid = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+
+ /* Now log the child information in the parent. */
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ memset(&tmpdbt, 0, sizeof(fiddbt));
+ memset(&namedbt, 0, sizeof(namedbt));
+ fiddbt.data = dbp->fileid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ tmpdbt.data = tmpdbp->fileid;
+ tmpdbt.size = DB_FILE_ID_LEN;
+ namedbt.data = (void *)old;
+ namedbt.size = (u_int32_t)strlen(old) + 1;
+ if ((t_ret =
+ __fop_file_remove_log(dbenv, txn, &lsn, 0, &fiddbt,
+ &tmpdbt, &namedbt, DB_APP_DATA, stxnid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* This is a delayed delete of the dummy file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, flags, NULL, &realold)) != 0)
+ goto err;
+ if ((ret = __txn_remevent(dbenv, txn, realold, NULL)) != 0)
+ goto err;
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmpdbp != NULL &&
+ (t_ret = __db_close_i(tmpdbp, NULL, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (realold != NULL)
+ __os_free(dbenv, realold);
+ if (realnew != NULL)
+ __os_free(dbenv, realnew);
+ if (realback != NULL)
+ __os_free(dbenv, realback);
+ if (back != NULL)
+ __os_free(dbenv, back);
+ return (ret);
+}
+
+/*
+ * __fop_dbrename --
+ * Do the appropriate file locking and file system operations
+ * to effect a dbrename in the absence of transactions (__fop_dummy
+ * and the subsequent calls in __db_rename do the work for the
+ * transactional case).
+ *
+ * PUBLIC: int __fop_dbrename __P((DB *, const char *, const char *));
+ */
+int
+__fop_dbrename(dbp, old, new)
+ DB *dbp;
+ const char *old, *new;
+{
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ char *real_new, *real_old;
+ int ret, tret;
+
+ dbenv = dbp->dbenv;
+ real_new = NULL;
+ real_old = NULL;
+ LOCK_INIT(elock);
+
+ /* Find the real newname of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &real_new)) != 0)
+ goto err;
+
+ /*
+ * It is an error to rename a file over one that already exists,
+ * as that wouldn't be transaction-safe.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if (__os_exists(real_new, NULL) == 0) {
+ ret = EEXIST;
+ __db_err(dbenv, "rename: file %s exists", real_new);
+ goto err;
+ }
+
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, 0, NULL, &real_old)) != 0)
+ goto err;
+
+ ret = dbenv->memp_nameop(dbenv, dbp->fileid, new, real_old, real_new);
+
+err: if ((tret = REL_ENVLOCK(dbenv, &elock)) != 0 && ret == 0)
+ ret = tret;
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+ if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ return (ret);
+}
diff --git a/storage/bdb/hash/hash.c b/storage/bdb/hash/hash.c
new file mode 100644
index 00000000000..2f972a3238d
--- /dev/null
+++ b/storage/bdb/hash/hash.c
@@ -0,0 +1,2062 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash.c,v 11.166 2002/08/06 06:11:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+static int __ham_bulk __P((DBC *, DBT *, u_int32_t));
+static int __ham_c_close __P((DBC *, db_pgno_t, int *));
+static int __ham_c_del __P((DBC *));
+static int __ham_c_destroy __P((DBC *));
+static int __ham_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __ham_c_writelock __P((DBC *));
+static int __ham_dup_return __P((DBC *, DBT *, u_int32_t));
+static int __ham_expand_table __P((DBC *));
+static int __ham_lookup __P((DBC *,
+ const DBT *, u_int32_t, db_lockmode_t, db_pgno_t *));
+static int __ham_overwrite __P((DBC *, DBT *, u_int32_t));
+
+/*
+ * __ham_quick_delete --
+ * When performing a DB->del operation that does not involve secondary
+ * indices and is not removing an off-page duplicate tree, we can
+ * speed things up substantially by removing the entire duplicate
+ * set, if any is present, in one operation, rather than by conjuring
+ * up and deleting each of the items individually. (All are stored
+ * in one big HKEYDATA structure.) We don't bother to distinguish
+ * on-page duplicate sets from single, non-dup items; they're deleted
+ * in exactly the same way.
+ *
+ * This function is called by __db_delete when the appropriate
+ * conditions are met, and it performs the delete in the optimized way.
+ *
+ * The cursor should be set to the first item in the duplicate
+ * set, or to the sole key/data pair when the key does not have a
+ * duplicate set, before the function is called.
+ *
+ * PUBLIC: int __ham_quick_delete __P((DBC *));
+ */
+int
+__ham_quick_delete(dbc)
+ DBC *dbc;
+{
+ int ret, t_ret;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ /* Assert that we're not using secondary indices. */
+ DB_ASSERT(!F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ /*
+ * We should assert that we're not a primary either, but that
+ * would require grabbing the dbp's mutex, so we don't bother.
+ */
+
+ /* Assert that we're set, but not to an off-page duplicate. */
+ DB_ASSERT(IS_INITIALIZED(dbc));
+ DB_ASSERT(((HASH_CURSOR *)dbc->internal)->opd == NULL);
+
+ ret = __ham_del_pair(dbc, 1);
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/* ****************** CURSORS ********************************** */
+/*
+ * __ham_c_init --
+ * Initialize the hash-specific portion of a cursor.
+ *
+ * PUBLIC: int __ham_c_init __P((DBC *));
+ */
+int
+__ham_c_init(dbc)
+ DBC *dbc;
+{
+ DB_ENV *dbenv;
+ HASH_CURSOR *new_curs;
+ int ret;
+
+ dbenv = dbc->dbp->dbenv;
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(struct cursor_t), &new_curs)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(dbenv,
+ dbc->dbp->pgsize, &new_curs->split_buf)) != 0) {
+ __os_free(dbenv, new_curs);
+ return (ret);
+ }
+
+ dbc->internal = (DBC_INTERNAL *) new_curs;
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __ham_bulk;
+ dbc->c_am_close = __ham_c_close;
+ dbc->c_am_del = __ham_c_del;
+ dbc->c_am_destroy = __ham_c_destroy;
+ dbc->c_am_get = __ham_c_get;
+ dbc->c_am_put = __ham_c_put;
+ dbc->c_am_writelock = __ham_c_writelock;
+
+ __ham_item_init(dbc);
+
+ return (0);
+}
+
+/*
+ * __ham_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__ham_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HKEYDATA *dp;
+ int doroot, gotmeta, ret, t_ret;
+ u_int32_t dirty;
+
+ COMPQUIET(rmroot, 0);
+ mpf = dbc->dbp->mpf;
+ dirty = 0;
+ doroot = gotmeta = ret = 0;
+ hcp = (HASH_CURSOR *) dbc->internal;
+
+ /* Check for off page dups. */
+ if (dbc->internal->opd != NULL) {
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto done;
+ gotmeta = 1;
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ goto out;
+ dp = (HKEYDATA *)H_PAIRDATA(dbc->dbp, hcp->page, hcp->indx);
+
+ /* If its not a dup we aborted before we changed it. */
+ if (HPAGE_PTYPE(dp) == H_OFFDUP)
+ memcpy(&root_pgno,
+ HOFFPAGE_PGNO(dp), sizeof(db_pgno_t));
+ else
+ root_pgno = PGNO_INVALID;
+
+ if ((ret =
+ hcp->opd->c_am_close(hcp->opd, root_pgno, &doroot)) != 0)
+ goto out;
+ if (doroot != 0) {
+ if ((ret = __ham_del_pair(dbc, 1)) != 0)
+ goto out;
+ dirty = DB_MPOOL_DIRTY;
+ }
+ }
+
+out: if (hcp->page != NULL && (t_ret =
+ mpf->put(mpf, hcp->page, dirty)) != 0 && ret == 0)
+ ret = t_ret;
+ if (gotmeta != 0 && (t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+done:
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * __ham_c_destroy --
+ * Cleanup the access method private part of a cursor.
+ */
+static int
+__ham_c_destroy(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (hcp->split_buf != NULL)
+ __os_free(dbc->dbp->dbenv, hcp->split_buf);
+ __os_free(dbc->dbp->dbenv, hcp);
+
+ return (0);
+}
+
+/*
+ * __ham_c_count --
+ * Return a count of on-page duplicates.
+ *
+ * PUBLIC: int __ham_c_count __P((DBC *, db_recno_t *));
+ */
+int
+__ham_c_count(dbc, recnop)
+ DBC *dbc;
+ db_recno_t *recnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ db_indx_t len;
+ db_recno_t recno;
+ int ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ recno = 0;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
+ return (ret);
+
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ case H_OFFPAGE:
+ recno = 1;
+ break;
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ for (; p < pend; recno++) {
+ /* p may be odd, so copy rather than just dereffing */
+ memcpy(&len, p, sizeof(db_indx_t));
+ p += 2 * sizeof(db_indx_t) + len;
+ }
+
+ break;
+ default:
+ ret = __db_pgfmt(dbp->dbenv, hcp->pgno);
+ goto err;
+ }
+
+ *recnop = recno;
+
+err: if ((t_ret = mpf->put(mpf, hcp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ return (ret);
+}
+
+static int
+__ham_c_del(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBT repldbt;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED))
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+
+ if ((ret = __ham_get_cpage(dbc, DB_LOCK_WRITE)) != 0)
+ goto out;
+
+ /* Off-page duplicates. */
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP)
+ goto out;
+
+ if (F_ISSET(hcp, H_ISDUP)) { /* On-page duplicate. */
+ if (hcp->dup_off == 0 &&
+ DUP_SIZE(hcp->dup_len) == LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx))
+ ret = __ham_del_pair(dbc, 1);
+ else {
+ repldbt.flags = 0;
+ F_SET(&repldbt, DB_DBT_PARTIAL);
+ repldbt.doff = hcp->dup_off;
+ repldbt.dlen = DUP_SIZE(hcp->dup_len);
+ repldbt.size = 0;
+ repldbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
+ hcp->indx));
+ if ((ret = __ham_replpair(dbc, &repldbt, 0)) == 0) {
+ hcp->dup_tlen -= DUP_SIZE(hcp->dup_len);
+ F_SET(hcp, H_DELETED);
+ ret = __ham_c_update(dbc,
+ DUP_SIZE(hcp->dup_len), 0, 1);
+ }
+ }
+
+ } else /* Not a duplicate */
+ ret = __ham_del_pair(dbc, 1);
+
+out: if (hcp->page != NULL) {
+ if ((t_ret = mpf->put(mpf,
+ hcp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ }
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_c_dup --
+ * Duplicate a hash cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __ham_c_dup __P((DBC *, DBC *));
+ */
+int
+__ham_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ HASH_CURSOR *orig, *new;
+
+ orig = (HASH_CURSOR *)orig_dbc->internal;
+ new = (HASH_CURSOR *)new_dbc->internal;
+
+ new->bucket = orig->bucket;
+ new->lbucket = orig->lbucket;
+ new->dup_off = orig->dup_off;
+ new->dup_len = orig->dup_len;
+ new->dup_tlen = orig->dup_tlen;
+
+ if (F_ISSET(orig, H_DELETED))
+ F_SET(new, H_DELETED);
+ if (F_ISSET(orig, H_ISDUP))
+ F_SET(new, H_ISDUP);
+
+ /*
+ * If the old cursor held a lock and we're not in transactions, get one
+ * for the new one. The reason that we don't need a new lock if we're
+ * in a transaction is because we already hold a lock and will continue
+ * to do so until commit, so there is no point in reaquiring it. We
+ * don't know if the old lock was a read or write lock, but it doesn't
+ * matter. We'll get a read lock. We know that this locker already
+ * holds a lock of the correct type, so if we need a write lock and
+ * request it, we know that we'll get it.
+ */
+ if (!LOCK_ISSET(orig->lock) || orig_dbc->txn != NULL)
+ return (0);
+
+ return (__ham_lock_bucket(new_dbc, DB_LOCK_READ));
+}
+
+static int
+__ham_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ db_lockmode_t lock_type;
+ int get_key, ret, t_ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ /* Clear OR'd in additional bits so we can check for flag equality. */
+ if (F_ISSET(dbc, DBC_RMW))
+ lock_type = DB_LOCK_WRITE;
+ else
+ lock_type = DB_LOCK_READ;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = 0;
+
+ ret = 0;
+ get_key = 1;
+ switch (flags) {
+ case DB_PREV_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_PREV:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_prev(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ ret = __ham_item_last(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_NODUP:
+ F_SET(hcp, H_NEXT_NODUP);
+ /* FALLTHROUGH */
+ case DB_NEXT:
+ if (IS_INITIALIZED(dbc)) {
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ ret = __ham_item_first(dbc, lock_type, pgnop);
+ break;
+ case DB_NEXT_DUP:
+ /* cgetchk has already determined that the cursor is set. */
+ F_SET(hcp, H_DUPONLY);
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ ret = __ham_lookup(dbc, key, 0, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_GET_BOTHC:
+ F_SET(hcp, H_DUPONLY);
+
+ ret = __ham_item_next(dbc, lock_type, pgnop);
+ get_key = 0;
+ break;
+ case DB_CURRENT:
+ /* cgetchk has already determined that the cursor is set. */
+ if (F_ISSET(hcp, H_DELETED)) {
+ ret = DB_KEYEMPTY;
+ goto err;
+ }
+
+ ret = __ham_item(dbc, lock_type, pgnop);
+ break;
+ }
+
+ /*
+ * Must always enter this loop to do error handling and
+ * check for big key/data pair.
+ */
+ for (;;) {
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
+ else if (F_ISSET(hcp, H_OK)) {
+ if (*pgnop == PGNO_INVALID)
+ ret = __ham_dup_return(dbc, data, flags);
+ break;
+ } else if (!F_ISSET(hcp, H_NOMORE)) {
+ __db_err(dbp->dbenv,
+ "H_NOMORE returned to __ham_c_get");
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Ran out of entries in a bucket; change buckets.
+ */
+ switch (flags) {
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ ret = mpf->put(mpf, hcp->page, 0);
+ hcp->page = NULL;
+ if (hcp->bucket == 0) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ F_CLR(hcp, H_ISDUP);
+ hcp->bucket--;
+ hcp->indx = NDX_INVALID;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (ret == 0)
+ ret = __ham_item_prev(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_FIRST:
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ ret = mpf->put(mpf, hcp->page, 0);
+ hcp->page = NULL;
+ hcp->indx = NDX_INVALID;
+ hcp->bucket++;
+ F_CLR(hcp, H_ISDUP);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (hcp->bucket > hcp->hdr->max_bucket) {
+ ret = DB_NOTFOUND;
+ hcp->pgno = PGNO_INVALID;
+ goto err;
+ }
+ if (ret == 0)
+ ret = __ham_item_next(dbc,
+ lock_type, pgnop);
+ break;
+ case DB_GET_BOTH:
+ case DB_GET_BOTHC:
+ case DB_GET_BOTH_RANGE:
+ case DB_NEXT_DUP:
+ case DB_SET:
+ case DB_SET_RANGE:
+ /* Key not found. */
+ ret = DB_NOTFOUND;
+ goto err;
+ case DB_CURRENT:
+ /*
+ * This should only happen if you are doing
+ * deletes and reading with concurrent threads
+ * and not doing proper locking. We return
+ * the same error code as we would if the
+ * cursor were deleted.
+ */
+ ret = DB_KEYEMPTY;
+ goto err;
+ default:
+ DB_ASSERT(0);
+ }
+ }
+
+ if (get_key == 0)
+ F_SET(key, DB_DBT_ISSET);
+
+err: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(hcp, H_DUPONLY);
+ F_CLR(hcp, H_NEXT_NODUP);
+
+ return (ret);
+}
+
+/*
+ * __ham_bulk -- Return bulk data from a hash table.
+ */
+static int
+__ham_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t dup_len, dup_off, dup_tlen, indx, *inp;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int32_t *endp, key_off, *offp, *saveoff;
+ u_int32_t key_size, size, space;
+ u_int8_t *dbuf, *dp, *hk, *np, *tmp;
+ int is_dup, is_key;
+ int need_pg, next_key, no_dup, pagesize, ret, t_ret;
+
+ ret = 0;
+ key_off = 0;
+ dup_len = dup_off = dup_tlen = 0;
+ size = 0;
+ dbp = dbc->dbp;
+ pagesize = dbp->pgsize;
+ mpf = dbp->mpf;
+ cp = (HASH_CURSOR *)dbc->internal;
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+ lock_mode = F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE: DB_LOCK_READ;
+
+next_pg:
+ need_pg = 1;
+ indx = cp->indx;
+ pg = cp->page;
+ inp = P_INP(dbp, pg);
+
+ do {
+ if (is_key) {
+ hk = H_PAIRKEY(dbp, pg, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&key_size,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ size = key_size;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(
+ dbc, key_size, pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ goto back_up;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = LEN_HKEY(dbp, pg, pagesize, indx);
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(HKEYDATA, data));
+ }
+ }
+
+ hk = H_PAIRDATA(dbp, pg, indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_DUPLICATE:
+ case H_KEYDATA:
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ if (indx != 0) {
+ indx -= 2;
+ /* XXX
+ * It's not clear that this is
+ * the right way to fix this,
+ * but here goes.
+ * If we are backing up onto a
+ * duplicate, then we need to
+ * position ourselves at the
+ * end of the duplicate set.
+ * We probably need to make
+ * this work for H_OFFDUP too.
+ * It might be worth making a
+ * dummy cursor and calling
+ * __ham_item_prev.
+ */
+ tmp = H_PAIRDATA(dbp, pg, indx);
+ if (HPAGE_PTYPE(tmp) ==
+ H_DUPLICATE) {
+ dup_off = dup_tlen =
+ LEN_HDATA(dbp, pg,
+ pagesize, indx + 1);
+ memcpy(&dup_len,
+ HKEYDATA_DATA(tmp),
+ sizeof(db_indx_t));
+ }
+ goto get_space;
+ }
+ /* indx == 0 */
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0) {
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(mpf,
+ cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if (cp->bucket == 0) {
+ cp->indx = indx =
+ NDX_INVALID;
+ goto get_space;
+ }
+ if ((ret =
+ __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket--;
+ cp->pgno = BUCKET_TO_PAGE(cp,
+ cp->bucket);
+ cp->indx = NDX_INVALID;
+ if ((ret = __ham_release_meta(
+ dbc)) != 0)
+ return (ret);
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0)
+ return (ret);
+ }
+ indx = cp->indx;
+get_space:
+ /*
+ * See if we put any data in the buffer.
+ */
+ if (offp >= endp ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * Don't continue; we're all out
+ * of space, even though we're
+ * returning success.
+ */
+ next_key = 0;
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+
+ /*
+ * We're about to crack the offset(s) and length(s)
+ * out of an H_KEYDATA or H_DUPLICATE item.
+ * There are three cases:
+ * 1. We were moved into a duplicate set by
+ * the standard hash cursor code. Respect
+ * the dup_off and dup_tlen we were given.
+ * 2. We stumbled upon a duplicate set while
+ * walking the page on our own. We need to
+ * recognize it as a dup and set dup_off and
+ * dup_tlen.
+ * 3. The current item is not a dup.
+ */
+ if (F_ISSET(cp, H_ISDUP)) {
+ /* Case 1 */
+ is_dup = 1;
+ dup_len = cp->dup_len;
+ dup_off = cp->dup_off;
+ dup_tlen = cp->dup_tlen;
+ } else if (HPAGE_PTYPE(hk) == H_DUPLICATE) {
+ /* Case 2 */
+ is_dup = 1;
+ /*
+ * If we run out of memory and bail,
+ * make sure the fact we're in a dup set
+ * isn't ignored later.
+ */
+ F_SET(cp, H_ISDUP);
+ dup_off = 0;
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ dup_tlen = LEN_HDATA(dbp, pg, pagesize, indx);
+ } else
+ /* Case 3 */
+ is_dup = dup_len = dup_off = dup_tlen = 0;
+
+ do {
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ size += (is_key ? 4 : 2) * sizeof(*offp);
+ /*
+ * Since space is an unsigned, if we happen
+ * to wrap, then this comparison will turn out
+ * to be true. XXX Wouldn't it be better to
+ * simply check above that space is greater than
+ * the value we're about to subtract???
+ */
+ if (space > data->ulen) {
+ if (!is_dup || dup_off == 0)
+ goto back_up;
+ dup_off -= (db_indx_t)DUP_SIZE(offp[1]);
+ goto get_space;
+ }
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ if (is_dup) {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data) +
+ dup_off + sizeof(db_indx_t));
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk) + dup_off,
+ sizeof(db_indx_t));
+ dup_off += DUP_SIZE(dup_len);
+ *offp-- = dup_len;
+ } else {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data));
+ *offp-- = LEN_HDATA(dbp, pg,
+ pagesize, indx);
+ }
+ } while (is_dup && dup_off < dup_tlen && no_dup == 0);
+ F_CLR(cp, H_ISDUP);
+ break;
+ case H_OFFDUP:
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ if (is_key) {
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ saveoff = offp;
+ if ((ret = __bam_bulk_duplicates(dbc,
+ pgno, dbuf, is_key ? offp + 2 : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ if (is_key && saveoff == offp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ break;
+ case H_OFFPAGE:
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ memcpy(&size, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if (size > space)
+ goto back_up;
+
+ if ((ret =
+ __bam_bulk_overflow(dbc, size, pgno, np)) != 0)
+ return (ret);
+
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+
+ *offp-- = (int32_t)(np - dbuf);
+ *offp-- = size;
+
+ np += size;
+ space -= size;
+ break;
+ }
+ } while (next_key && (indx += 2) < NUM_ENT(pg));
+
+ cp->indx = indx;
+ cp->dup_len = dup_len;
+ cp->dup_off = dup_off;
+ cp->dup_tlen = dup_tlen;
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ if ((ret = __ham_item_next(dbc, lock_mode, &pgno)) == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(dbc->dbp->mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket++;
+ if (cp->bucket > cp->hdr->max_bucket) {
+ /*
+ * Restore cursor to its previous state. We're past
+ * the last item in the last bucket, so the next
+ * DBC->c_get(DB_NEXT) will return DB_NOTFOUND.
+ */
+ cp->bucket--;
+ ret = DB_NOTFOUND;
+ } else {
+ /*
+ * Start on the next bucket.
+ *
+ * Note that if this new bucket happens to be empty,
+ * but there's another non-empty bucket after it,
+ * we'll return early. This is a rare case, and we
+ * don't guarantee any particular number of keys
+ * returned on each call, so just let the next call
+ * to bulk get move forward by yet another bucket.
+ */
+ cp->pgno = BUCKET_TO_PAGE(cp, cp->bucket);
+ cp->indx = NDX_INVALID;
+ F_CLR(cp, H_ISDUP);
+ ret = __ham_item_next(dbc, lock_mode, &pgno);
+ }
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0)
+ return (t_ret);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
+static int
+__ham_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key;
+ DBT *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ DBT tmp_val, *myval;
+ HASH_CURSOR *hcp;
+ u_int32_t nbytes;
+ int ret, t_ret;
+
+ /*
+ * The compiler doesn't realize that we only use this when ret is
+ * equal to 0 and that if ret is equal to 0, that we must have set
+ * myval. So, we initialize it here to shut the compiler up.
+ */
+ COMPQUIET(myval, NULL);
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED) &&
+ flags != DB_KEYFIRST && flags != DB_KEYLAST)
+ return (DB_NOTFOUND);
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ switch (flags) {
+ case DB_KEYLAST:
+ case DB_KEYFIRST:
+ case DB_NODUPDATA:
+ nbytes = (ISBIG(hcp, key->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(key->size)) +
+ (ISBIG(hcp, data->size) ? HOFFPAGE_PSIZE :
+ HKEYDATA_PSIZE(data->size));
+ if ((ret = __ham_lookup(dbc,
+ key, nbytes, DB_LOCK_WRITE, pgnop)) == DB_NOTFOUND) {
+ ret = 0;
+ if (hcp->seek_found_page != PGNO_INVALID &&
+ hcp->seek_found_page != hcp->pgno) {
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
+ goto err2;
+ hcp->page = NULL;
+ hcp->pgno = hcp->seek_found_page;
+ hcp->indx = NDX_INVALID;
+ }
+
+ if (F_ISSET(data, DB_DBT_PARTIAL) && data->doff != 0) {
+ /*
+ * A partial put, but the key does not exist
+ * and we are not beginning the write at 0.
+ * We must create a data item padded up to doff
+ * and then write the new bytes represented by
+ * val.
+ */
+ if ((ret = __ham_init_dbt(dbp->dbenv, &tmp_val,
+ data->size + data->doff,
+ &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) == 0) {
+ memset(tmp_val.data, 0, data->doff);
+ memcpy((u_int8_t *)tmp_val.data +
+ data->doff, data->data, data->size);
+ myval = &tmp_val;
+ }
+ } else
+ myval = (DBT *)data;
+
+ if (ret == 0)
+ ret = __ham_add_el(dbc, key, myval, H_KEYDATA);
+ goto done;
+ }
+ break;
+ case DB_BEFORE:
+ case DB_AFTER:
+ case DB_CURRENT:
+ ret = __ham_item(dbc, DB_LOCK_WRITE, pgnop);
+ break;
+ }
+
+ if (*pgnop == PGNO_INVALID && ret == 0) {
+ if (flags == DB_CURRENT ||
+ ((flags == DB_KEYFIRST ||
+ flags == DB_KEYLAST || flags == DB_NODUPDATA) &&
+ !(F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))))
+ ret = __ham_overwrite(dbc, data, flags);
+ else
+ ret = __ham_add_dup(dbc, data, flags, pgnop);
+ }
+
+done: if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {
+ ret = __ham_expand_table(dbc);
+ F_CLR(hcp, H_EXPAND);
+ }
+
+ if (hcp->page != NULL &&
+ (t_ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+err2: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+err1: return (ret);
+}
+
+/********************************* UTILITIES ************************/
+
+/*
+ * __ham_expand_table --
+ */
+static int
+__ham_expand_table(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
+ HASH_CURSOR *hcp;
+ PAGE *h;
+ db_pgno_t pgno, mpgno;
+ u_int32_t newalloc, new_bucket, old_bucket;
+ int dirty_meta, got_meta, logn, new_double, ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+
+ LOCK_INIT(metalock);
+ mmeta = (DBMETA *) hcp->hdr;
+ mpgno = mmeta->pgno;
+ h = NULL;
+ dirty_meta = 0;
+ got_meta = 0;
+ newalloc = 0;
+
+ /*
+ * If the split point is about to increase, make sure that we
+ * have enough extra pages. The calculation here is weird.
+ * We'd like to do this after we've upped max_bucket, but it's
+ * too late then because we've logged the meta-data split. What
+ * we'll do between then and now is increment max bucket and then
+ * see what the log of one greater than that is; here we have to
+ * look at the log of max + 2. VERY NASTY STUFF.
+ *
+ * We figure out what we need to do, then we log it, then request
+ * the pages from mpool. We don't want to fail after extending
+ * the file.
+ *
+ * If the page we are about to split into has already been allocated,
+ * then we simply need to get it to get its LSN. If it hasn't yet
+ * been allocated, then we know it's LSN (0,0).
+ */
+
+ new_bucket = hcp->hdr->max_bucket + 1;
+ old_bucket = new_bucket & hcp->hdr->low_mask;
+
+ new_double = hcp->hdr->max_bucket == hcp->hdr->high_mask;
+ logn = __db_log2(new_bucket);
+
+ if (!new_double || hcp->hdr->spares[logn + 1] != PGNO_INVALID) {
+ /* Page exists; get it so we can get its LSN */
+ pgno = BUCKET_TO_PAGE(hcp, new_bucket);
+ if ((ret =
+ mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ lsn = h->lsn;
+ } else {
+ /* Get the master meta-data page to do allocation. */
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ 0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &mpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto err;
+ got_meta = 1;
+ }
+ pgno = mmeta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ newalloc = 1;
+ }
+
+ /* Log the meta-data split first. */
+ if (DBC_LOGGING(dbc)) {
+ /*
+ * We always log the page number of the first page of
+ * the allocation group. However, the LSN that we log
+ * is either the LSN on the first page (if we did not
+ * do the actual allocation here) or the LSN on the last
+ * page of the unit (if we did do the allocation here).
+ */
+ if ((ret = __ham_metagroup_log(dbp, dbc->txn,
+ &lsn, 0, hcp->hdr->max_bucket, mpgno, &mmeta->lsn,
+ hcp->hdr->dbmeta.pgno, &hcp->hdr->dbmeta.lsn,
+ pgno, &lsn, newalloc)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(lsn);
+
+ hcp->hdr->dbmeta.lsn = lsn;
+
+ if (new_double && hcp->hdr->spares[logn + 1] == PGNO_INVALID) {
+ /*
+ * We need to begin a new doubling and we have not allocated
+ * any pages yet. Read the last page in and initialize it to
+ * make the allocation contiguous. The pgno we calculated
+ * above is the first page allocated. The entry in spares is
+ * that page number minus any buckets already allocated (it
+ * simplifies bucket to page transaction). After we've set
+ * that, we calculate the last pgno.
+ */
+
+ hcp->hdr->spares[logn + 1] = pgno - new_bucket;
+ pgno += hcp->hdr->max_bucket;
+ mmeta->last_pgno = pgno;
+ mmeta->lsn = lsn;
+ dirty_meta = DB_MPOOL_DIRTY;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+
+ P_INIT(h, dbp->pgsize,
+ pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ }
+
+ /* Write out whatever page we ended up modifying. */
+ h->lsn = lsn;
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ h = NULL;
+
+ /*
+ * Update the meta-data page of this hash database.
+ */
+ hcp->hdr->max_bucket = new_bucket;
+ if (new_double) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask = new_bucket | hcp->hdr->low_mask;
+ }
+
+ /* Relocate records to the new bucket */
+ ret = __ham_split_page(dbc, old_bucket, new_bucket);
+
+err: if (got_meta)
+ (void)mpf->put(mpf, mmeta, dirty_meta);
+
+ if (LOCK_ISSET(metalock))
+ (void)__TLPUT(dbc, metalock);
+
+ if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+ */
+u_int32_t
+__ham_call_hash(dbc, k, len)
+ DBC *dbc;
+ u_int8_t *k;
+ int32_t len;
+{
+ DB *dbp;
+ u_int32_t n, bucket;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+
+ n = (u_int32_t)(hashp->h_hash(dbp, k, len));
+
+ bucket = n & hcp->hdr->high_mask;
+ if (bucket > hcp->hdr->max_bucket)
+ bucket = bucket & hcp->hdr->low_mask;
+ return (bucket);
+}
+
+/*
+ * Check for duplicates, and call __db_ret appropriately. Release
+ * everything held by the cursor.
+ */
+static int
+__ham_dup_return(dbc, val, flags)
+ DBC *dbc;
+ DBT *val;
+ u_int32_t flags;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ PAGE *pp;
+ DBT *myval, tmp_val;
+ db_indx_t ndx;
+ db_pgno_t pgno;
+ u_int32_t off, tlen;
+ u_int8_t *hk, type;
+ int cmp, ret;
+ db_indx_t len;
+
+ /* Check for duplicate and return the first one. */
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ndx = H_DATAINDEX(hcp->indx);
+ type = HPAGE_TYPE(dbp, hcp->page, ndx);
+ pp = hcp->page;
+ myval = val;
+
+ /*
+ * There are 4 cases:
+ * 1. We are not in duplicate, simply return; the upper layer
+ * will do the right thing.
+ * 2. We are looking at keys and stumbled onto a duplicate.
+ * 3. We are in the middle of a duplicate set. (ISDUP set)
+ * 4. We need to check for particular data match.
+ */
+
+ /* We should never get here with off-page dups. */
+ DB_ASSERT(type != H_OFFDUP);
+
+ /* Case 1 */
+ if (type != H_DUPLICATE && flags != DB_GET_BOTH &&
+ flags != DB_GET_BOTHC && flags != DB_GET_BOTH_RANGE)
+ return (0);
+
+ /*
+ * Here we check for the case where we just stumbled onto a
+ * duplicate. In this case, we do initialization and then
+ * let the normal duplicate code handle it. (Case 2)
+ */
+ if (!F_ISSET(hcp, H_ISDUP) && type == H_DUPLICATE) {
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (flags == DB_LAST ||
+ flags == DB_PREV || flags == DB_PREV_NODUP) {
+ hcp->dup_off = 0;
+ do {
+ memcpy(&len,
+ HKEYDATA_DATA(hk) + hcp->dup_off,
+ sizeof(db_indx_t));
+ hcp->dup_off += DUP_SIZE(len);
+ } while (hcp->dup_off < hcp->dup_tlen);
+ hcp->dup_off -= DUP_SIZE(len);
+ } else {
+ memcpy(&len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ hcp->dup_off = 0;
+ }
+ hcp->dup_len = len;
+ }
+
+ /*
+ * If we are retrieving a specific key/data pair, then we
+ * may need to adjust the cursor before returning data.
+ * Case 4
+ */
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * If we're doing a join, search forward from the
+ * current position, not the beginning of the dup set.
+ */
+ if (flags == DB_GET_BOTHC)
+ F_SET(hcp, H_CONTINUE);
+
+ __ham_dsearch(dbc, val, &off, &cmp, flags);
+
+ /*
+ * This flag is set nowhere else and is safe to
+ * clear unconditionally.
+ */
+ F_CLR(hcp, H_CONTINUE);
+ hcp->dup_off = off;
+ } else {
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (((HKEYDATA *)hk)->type == H_OFFPAGE) {
+ memcpy(&tlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp, val,
+ pgno, tlen, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ } else {
+ /*
+ * We do not zero tmp_val since the comparison
+ * routines may only look at data and size.
+ */
+ tmp_val.data = HKEYDATA_DATA(hk);
+ tmp_val.size = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ cmp = dbp->dup_compare == NULL ?
+ __bam_defcmp(dbp, &tmp_val, val) :
+ dbp->dup_compare(dbp, &tmp_val, val);
+ }
+ }
+
+ if (cmp != 0)
+ return (DB_NOTFOUND);
+ }
+
+ /*
+ * If we're doing a bulk get, we don't want to actually return
+ * the data: __ham_bulk will take care of cracking out the
+ * duplicates appropriately.
+ *
+ * The rest of this function calculates partial offsets and
+ * handles the actual __db_ret, so just return if
+ * DB_MULTIPLE(_KEY) is set.
+ */
+ if (F_ISSET(dbc, DBC_MULTIPLE | DBC_MULTIPLE_KEY))
+ return (0);
+
+ /*
+ * Now, everything is initialized, grab a duplicate if
+ * necessary.
+ */
+ if (F_ISSET(hcp, H_ISDUP)) { /* Case 3 */
+ /*
+ * Copy the DBT in case we are retrieving into user
+ * memory and we need the parameters for it. If the
+ * user requested a partial, then we need to adjust
+ * the user's parameters to get the partial of the
+ * duplicate which is itself a partial.
+ */
+ memcpy(&tmp_val, val, sizeof(*val));
+ if (F_ISSET(&tmp_val, DB_DBT_PARTIAL)) {
+ /*
+ * Take the user's length unless it would go
+ * beyond the end of the duplicate.
+ */
+ if (tmp_val.doff + hcp->dup_off > hcp->dup_len)
+ tmp_val.dlen = 0;
+ else if (tmp_val.dlen + tmp_val.doff >
+ hcp->dup_len)
+ tmp_val.dlen =
+ hcp->dup_len - tmp_val.doff;
+
+ /*
+ * Calculate the new offset.
+ */
+ tmp_val.doff += hcp->dup_off;
+ } else {
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.dlen = hcp->dup_len;
+ tmp_val.doff = hcp->dup_off + sizeof(db_indx_t);
+ }
+ myval = &tmp_val;
+ }
+
+ /*
+ * Finally, if we had a duplicate, pp, ndx, and myval should be
+ * set appropriately.
+ */
+ if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata->data,
+ &dbc->rdata->ulen)) != 0)
+ return (ret);
+
+ /*
+ * In case we sent a temporary off to db_ret, set the real
+ * return values.
+ */
+ val->data = myval->data;
+ val->size = myval->size;
+
+ F_SET(val, DB_DBT_ISSET);
+
+ return (0);
+}
+
+static int
+__ham_overwrite(dbc, nval, flags)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ HASH_CURSOR *hcp;
+ DBT *myval, tmp_val, tmp_val2;
+ void *newrec;
+ u_int8_t *hk, *p;
+ u_int32_t len, nondup_size;
+ db_indx_t newsize;
+ int ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if (F_ISSET(hcp, H_ISDUP)) {
+ /*
+ * This is an overwrite of a duplicate. We should never
+ * be off-page at this point.
+ */
+ DB_ASSERT(hcp->opd == NULL);
+ /* On page dups */
+ if (F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /*
+ * We're going to have to get the current item, then
+ * construct the record, do any padding and do a
+ * replace.
+ */
+ memset(&tmp_val, 0, sizeof(tmp_val));
+ if ((ret =
+ __ham_dup_return(dbc, &tmp_val, DB_CURRENT)) != 0)
+ return (ret);
+
+ /* Figure out new size. */
+ nondup_size = tmp_val.size;
+ newsize = nondup_size;
+
+ /*
+ * Three cases:
+ * 1. strictly append (may need to allocate space
+ * for pad bytes; really gross).
+ * 2. overwrite some and append.
+ * 3. strictly overwrite.
+ */
+ if (nval->doff > nondup_size)
+ newsize +=
+ (nval->doff - nondup_size + nval->size);
+ else if (nval->doff + nval->dlen > nondup_size)
+ newsize += nval->size -
+ (nondup_size - nval->doff);
+ else
+ newsize += nval->size - nval->dlen;
+
+ /*
+ * Make sure that the new size doesn't put us over
+ * the onpage duplicate size in which case we need
+ * to convert to off-page duplicates.
+ */
+ if (ISBIG(hcp, hcp->dup_tlen - nondup_size + newsize)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ DUP_SIZE(newsize), &newrec)) != 0)
+ return (ret);
+ memset(&tmp_val2, 0, sizeof(tmp_val2));
+ F_SET(&tmp_val2, DB_DBT_PARTIAL);
+
+ /* Construct the record. */
+ p = newrec;
+ /* Initial size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+
+ /* First part of original record. */
+ len = nval->doff > tmp_val.size
+ ? tmp_val.size : nval->doff;
+ memcpy(p, tmp_val.data, len);
+ p += len;
+
+ if (nval->doff > tmp_val.size) {
+ /* Padding */
+ memset(p, 0, nval->doff - tmp_val.size);
+ p += nval->doff - tmp_val.size;
+ }
+
+ /* New bytes */
+ memcpy(p, nval->data, nval->size);
+ p += nval->size;
+
+ /* End of original record (if there is any) */
+ if (nval->doff + nval->dlen < tmp_val.size) {
+ len = tmp_val.size - nval->doff - nval->dlen;
+ memcpy(p, (u_int8_t *)tmp_val.data +
+ nval->doff + nval->dlen, len);
+ p += len;
+ }
+
+ /* Final size. */
+ memcpy(p, &newsize, sizeof(db_indx_t));
+
+ /*
+ * Make sure that the caller isn't corrupting
+ * the sort order.
+ */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ (u_int8_t *)newrec + sizeof(db_indx_t);
+ tmp_val2.size = newsize;
+ if (dbp->dup_compare(
+ dbp, &tmp_val, &tmp_val2) != 0) {
+ (void)__os_free(dbenv, newrec);
+ return (__db_duperr(dbp, flags));
+ }
+ }
+
+ tmp_val2.data = newrec;
+ tmp_val2.size = DUP_SIZE(newsize);
+ tmp_val2.doff = hcp->dup_off;
+ tmp_val2.dlen = DUP_SIZE(hcp->dup_len);
+
+ ret = __ham_replpair(dbc, &tmp_val2, 0);
+ (void)__os_free(dbenv, newrec);
+
+ /* Update cursor */
+ if (ret != 0)
+ return (ret);
+
+ if (newsize > nondup_size)
+ hcp->dup_tlen += (newsize - nondup_size);
+ else
+ hcp->dup_tlen -= (nondup_size - newsize);
+ hcp->dup_len = DUP_SIZE(newsize);
+ return (0);
+ } else {
+ /* Check whether we need to convert to off page. */
+ if (ISBIG(hcp,
+ hcp->dup_tlen - hcp->dup_len + nval->size)) {
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* Make sure we maintain sort order. */
+ if (dbp->dup_compare != NULL) {
+ tmp_val2.data =
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
+ hcp->indx)) + hcp->dup_off +
+ sizeof(db_indx_t);
+ tmp_val2.size = hcp->dup_len;
+ if (dbp->dup_compare(dbp, nval, &tmp_val2) != 0)
+ return (EINVAL);
+ }
+ /* Overwriting a complete duplicate. */
+ if ((ret =
+ __ham_make_dup(dbp->dbenv, nval, &tmp_val,
+ &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
+ return (ret);
+ /* Now fix what we are replacing. */
+ tmp_val.doff = hcp->dup_off;
+ tmp_val.dlen = DUP_SIZE(hcp->dup_len);
+
+ /* Update cursor */
+ if (nval->size > hcp->dup_len)
+ hcp->dup_tlen += (nval->size - hcp->dup_len);
+ else
+ hcp->dup_tlen -= (hcp->dup_len - nval->size);
+ hcp->dup_len = (db_indx_t)DUP_SIZE(nval->size);
+ }
+ myval = &tmp_val;
+ } else if (!F_ISSET(nval, DB_DBT_PARTIAL)) {
+ /* Put/overwrite */
+ memcpy(&tmp_val, nval, sizeof(*nval));
+ F_SET(&tmp_val, DB_DBT_PARTIAL);
+ tmp_val.doff = 0;
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE)
+ memcpy(&tmp_val.dlen,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ tmp_val.dlen = LEN_HDATA(dbp, hcp->page,
+ hcp->hdr->dbmeta.pagesize, hcp->indx);
+ myval = &tmp_val;
+ } else
+ /* Regular partial put */
+ myval = nval;
+
+ return (__ham_replpair(dbc, myval, 0));
+}
+
+/*
+ * Given a key and a cursor, sets the cursor to the page/ndx on which
+ * the key resides. If the key is found, the cursor H_OK flag is set
+ * and the pagep, bndx, pgno (dpagep, dndx, dpgno) fields are set.
+ * If the key is not found, the H_OK flag is not set. If the sought
+ * field is non-0, the pagep, bndx, pgno (dpagep, dndx, dpgno) fields
+ * are set indicating where an add might take place. If it is 0,
+ * non of the cursor pointer field are valid.
+ */
+static int
+__ham_lookup(dbc, key, sought, mode, pgnop)
+ DBC *dbc;
+ const DBT *key;
+ u_int32_t sought;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ u_int32_t tlen;
+ int match, ret;
+ u_int8_t *hk, *dk;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * Set up cursor so that we're looking for space to add an item
+ * as we cycle through the pages looking for the key.
+ */
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ hcp->seek_size = sought;
+
+ hcp->bucket = __ham_call_hash(dbc, (u_int8_t *)key->data, key->size);
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+
+ for (;;) {
+ *pgnop = PGNO_INVALID;
+ if ((ret = __ham_item_next(dbc, mode, pgnop)) != 0)
+ return (ret);
+
+ if (F_ISSET(hcp, H_NOMORE))
+ break;
+
+ hk = H_PAIRKEY(dbp, hcp->page, hcp->indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFPAGE:
+ memcpy(&tlen, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ if (tlen == key->size) {
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if ((ret = __db_moff(dbp,
+ key, pgno, tlen, NULL, &match)) != 0)
+ return (ret);
+ if (match == 0)
+ goto found_key;
+ }
+ break;
+ case H_KEYDATA:
+ if (key->size ==
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx) &&
+ memcmp(key->data,
+ HKEYDATA_DATA(hk), key->size) == 0) {
+ /* Found the key, check for data type. */
+found_key: F_SET(hcp, H_OK);
+ dk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (HPAGE_PTYPE(dk) == H_OFFDUP)
+ memcpy(pgnop, HOFFDUP_PGNO(dk),
+ sizeof(db_pgno_t));
+ return (0);
+ }
+ break;
+ case H_DUPLICATE:
+ case H_OFFDUP:
+ /*
+ * These are errors because keys are never
+ * duplicated, only data items are.
+ */
+ return (__db_pgfmt(dbp->dbenv, PGNO(hcp->page)));
+ }
+ }
+
+ /*
+ * Item was not found.
+ */
+
+ if (sought != 0)
+ return (ret);
+
+ return (ret);
+}
+
+/*
+ * __ham_init_dbt --
+ * Initialize a dbt using some possibly already allocated storage
+ * for items.
+ *
+ * PUBLIC: int __ham_init_dbt __P((DB_ENV *,
+ * PUBLIC: DBT *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__ham_init_dbt(dbenv, dbt, size, bufp, sizep)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t size;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ int ret;
+
+ memset(dbt, 0, sizeof(*dbt));
+ if (*sizep < size) {
+ if ((ret = __os_realloc(dbenv, size, bufp)) != 0) {
+ *sizep = 0;
+ return (ret);
+ }
+ *sizep = size;
+ }
+ dbt->data = *bufp;
+ dbt->size = size;
+ return (0);
+}
+
+/*
+ * Adjust the cursor after an insert or delete. The cursor passed is
+ * the one that was operated upon; we just need to check any of the
+ * others.
+ *
+ * len indicates the length of the item added/deleted
+ * add indicates if the item indicated by the cursor has just been
+ * added (add == 1) or deleted (add == 0).
+ * dup indicates if the addition occurred into a duplicate set.
+ *
+ * PUBLIC: int __ham_c_update
+ * PUBLIC: __P((DBC *, u_int32_t, int, int));
+ */
+int
+__ham_c_update(dbc, len, add, is_dup)
+ DBC *dbc;
+ u_int32_t len;
+ int add, is_dup;
+{
+ DB *dbp, *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ HASH_CURSOR *hcp, *lcp;
+ int found, ret;
+ u_int32_t order;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Adjustment will only be logged if this is a subtransaction.
+ * Only subtransactions can abort and effect their parent
+ * transactions cursors.
+ */
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+
+ /*
+ * Calculate the order of this deleted record.
+ * This will be one greater than any cursor that is pointing
+ * at this record and already marked as deleted.
+ */
+ order = 0;
+ if (!add) {
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ lcp = (HASH_CURSOR *)cp->internal;
+ if (F_ISSET(lcp, H_DELETED) &&
+ hcp->pgno == lcp->pgno &&
+ hcp->indx == lcp->indx &&
+ order <= lcp->order &&
+ (!is_dup || hcp->dup_off == lcp->dup_off))
+ order = lcp->order + 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ hcp->order = order;
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ if (lcp->pgno != hcp->pgno || lcp->indx == NDX_INVALID)
+ continue;
+
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+
+ if (!is_dup) {
+ if (add) {
+ /*
+ * This routine is not called to add
+ * non-dup records which are always put
+ * at the end. It is only called from
+ * recovery in this case and the
+ * cursor will be marked deleted.
+ * We are "undeleting" so unmark all
+ * cursors with the same order.
+ */
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED)) {
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+
+ /*
+ * If we've moved this cursor's
+ * index, split its order
+ * number--i.e., decrement it by
+ * enough so that the lowest
+ * cursor moved has order 1.
+ * cp_arg->order is the split
+ * point, so decrement by one
+ * less than that.
+ */
+ lcp->order -=
+ (hcp->order - 1);
+ lcp->indx += 2;
+ }
+ } else if (lcp->indx >= hcp->indx)
+ lcp->indx += 2;
+
+ } else {
+ if (lcp->indx > hcp->indx) {
+ lcp->indx -= 2;
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->indx == hcp->indx &&
+ !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ F_CLR(lcp, H_ISDUP);
+ lcp->order = order;
+ }
+ }
+ } else if (lcp->indx == hcp->indx) {
+ /*
+ * Handle duplicates. This routine is
+ * only called for on page dups.
+ * Off page dups are handled by btree/rtree
+ * code.
+ */
+ if (add) {
+ lcp->dup_tlen += len;
+ if (lcp->dup_off == hcp->dup_off &&
+ F_ISSET(hcp, H_DELETED) &&
+ F_ISSET(lcp, H_DELETED)) {
+ /* Abort of a delete. */
+ if (lcp->order == hcp->order)
+ F_CLR(lcp, H_DELETED);
+ else if (lcp->order >
+ hcp->order) {
+ lcp->order -=
+ (hcp->order -1);
+ lcp->dup_off += len;
+ }
+ } else if (lcp->dup_off >= hcp->dup_off)
+ lcp->dup_off += len;
+ } else {
+ lcp->dup_tlen -= len;
+ if (lcp->dup_off > hcp->dup_off) {
+ lcp->dup_off -= len;
+ if (lcp->dup_off ==
+ hcp->dup_off &&
+ F_ISSET(lcp, H_DELETED))
+ lcp->order += order;
+ } else if (lcp->dup_off ==
+ hcp->dup_off &&
+ !F_ISSET(lcp, H_DELETED)) {
+ F_SET(lcp, H_DELETED);
+ lcp->order = order;
+ }
+ }
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_curadj_log(dbp, my_txn, &lsn, 0, hcp->pgno,
+ hcp->indx, len, hcp->dup_off, add, is_dup, order)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_get_clist --
+ *
+ * Get a list of cursors either on a particular bucket or on a particular
+ * page and index combination. The former is so that we can update
+ * cursors on a split. The latter is so we can update cursors when we
+ * move items off page.
+ *
+ * PUBLIC: int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***));
+ */
+int
+__ham_get_clist(dbp, pgno, indx, listp)
+ DB *dbp;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ DBC ***listp;
+{
+ DB *ldbp;
+ DBC *cp;
+ DB_ENV *dbenv;
+ int nalloc, nused, ret;
+
+ /*
+ * Assume that finding anything is the exception, so optimize for
+ * the case where there aren't any.
+ */
+ nalloc = nused = 0;
+ *listp = NULL;
+ dbenv = dbp->dbenv;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links))
+ /*
+ * We match if cp->pgno matches the specified
+ * pgno, and if either the cp->indx matches
+ * or we weren't given an index.
+ */
+ if (cp->internal->pgno == pgno &&
+ (indx == NDX_INVALID ||
+ cp->internal->indx == indx)) {
+ if (nused >= nalloc) {
+ nalloc += 10;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *),
+ listp)) != 0)
+ goto err;
+ }
+ (*listp)[nused++] = cp;
+ }
+
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (listp != NULL) {
+ if (nused >= nalloc) {
+ nalloc++;
+ if ((ret = __os_realloc(dbp->dbenv,
+ nalloc * sizeof(HASH_CURSOR *), listp)) != 0)
+ return (ret);
+ }
+ (*listp)[nused] = NULL;
+ }
+ return (0);
+err:
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ return (ret);
+}
+
+static int
+__ham_c_writelock(dbc)
+ DBC *dbc;
+{
+ DB_ENV *dbenv;
+ DB_LOCK tmp_lock;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ /*
+ * All we need do is acquire the lock and let the off-page
+ * dup tree do its thing.
+ */
+ if (!STD_LOCKING(dbc))
+ return (0);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((!LOCK_ISSET(hcp->lock) || hcp->lock_mode == DB_LOCK_READ)) {
+ tmp_lock = hcp->lock;
+ if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) != 0)
+ return (ret);
+ dbenv = dbc->dbp->dbenv;
+ if (LOCK_ISSET(tmp_lock) &&
+ (ret = dbenv->lock_put(dbenv, &tmp_lock)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/storage/bdb/hash/hash.src b/storage/bdb/hash/hash.src
new file mode 100644
index 00000000000..b4b633c56e6
--- /dev/null
+++ b/storage/bdb/hash/hash.src
@@ -0,0 +1,266 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: hash.src,v 10.38 2002/04/17 19:03:10 krinsky Exp $
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+PREFIX __ham
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/hash.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * HASH-insdel: used for hash to insert/delete a pair of entries onto a master
+ * page. The pair might be regular key/data pairs or they might be the
+ * structures that refer to off page items, duplicates or offpage duplicates.
+ * opcode - PUTPAIR/DELPAIR + big masks
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being added (item index)
+ * pagelsn - lsn on the page before the update
+ * key - the key being inserted
+ * data - the data being inserted
+ */
+BEGIN insdel 21
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+DBT key DBT s
+DBT data DBT s
+END
+
+/*
+ * Used to add and remove overflow pages.
+ * prev_pgno is the previous page that is going to get modified to
+ * point to this one. If this is the first page in a chain
+ * then prev_pgno should be PGNO_INVALID.
+ * new_pgno is the page being allocated.
+ * next_pgno is the page that follows this one. On allocation,
+ * this should be PGNO_INVALID. For deletes, it may exist.
+ * pagelsn is the old lsn on the page.
+ */
+BEGIN newpage 22
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+WRLOCKNZ prev_pgno db_pgno_t lu
+POINTER prevlsn DB_LSN * lu
+WRLOCKNZ new_pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+WRLOCKNZ next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+END
+
+/*
+ * Splitting requires two types of log messages. The second logs the
+ * data on the original page. To redo the split, we have to visit the
+ * new page (pages) and add the items back on the page if they are not
+ * yet there.
+ */
+BEGIN splitdata 24
+DB fileid int32_t ld
+ARG opcode u_int32_t lu
+WRLOCK pgno db_pgno_t lu
+PGDBT pageimage DBT s
+POINTER pagelsn DB_LSN * lu
+END
+
+/*
+ * HASH-replace: is used for hash to handle partial puts that only
+ * affect a single master page.
+ * fileid - identifies the file referenced
+ * pgno - page within file
+ * ndx - index on the page of the item being modified (item index)
+ * pagelsn - lsn on the page before the update
+ * off - offset in the old item where the new item is going.
+ * olditem - DBT that describes the part of the item being replaced.
+ * newitem - DBT of the new item.
+ * makedup - this was a replacement that made an item a duplicate.
+ */
+BEGIN replace 25
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+ARG ndx u_int32_t lu
+POINTER pagelsn DB_LSN * lu
+ARG off int32_t ld
+DBT olditem DBT s
+DBT newitem DBT s
+ARG makedup u_int32_t lu
+END
+
+/*
+ * Used when we empty the first page in a bucket and there are pages after
+ * it. The page after it gets copied into the bucket page (since bucket
+ * pages have to be in fixed locations).
+ * pgno: the bucket page
+ * pagelsn: the old LSN on the bucket page
+ * next_pgno: the page number of the next page
+ * nnext_pgno: page after next_pgno (may need to change its prev)
+ * nnextlsn: the LSN of nnext_pgno.
+ */
+BEGIN copypage 28
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+WRLOCK next_pgno db_pgno_t lu
+POINTER nextlsn DB_LSN * lu
+WRLOCKNZ nnext_pgno db_pgno_t lu
+POINTER nnextlsn DB_LSN * lu
+PGDBT page DBT s
+END
+
+/*
+ * This record logs the meta-data aspects of a split operation. It has enough
+ * information so that we can record both an individual page allocation as well
+ * as a group allocation which we do because in sub databases, the pages in
+ * a hash doubling, must be contiguous. If we do a group allocation, the
+ * number of pages allocated is bucket + 1, pgno is the page number of the
+ * first newly allocated bucket.
+ *
+ * bucket: Old maximum bucket number.
+ * mmpgno: Master meta-data page number (0 if same as mpgno).
+ * mmetalsn: Lsn of the master meta-data page.
+ * mpgno: Meta-data page number.
+ * metalsn: Lsn of the meta-data page.
+ * pgno: Page allocated to bucket + 1 (first newly allocated page)
+ * pagelsn: Lsn of either the first page allocated (if newalloc == 0) or
+ * the last page allocated (if newalloc == 1).
+ * newalloc: 1 indicates that this record did the actual allocation;
+ * 0 indicates that the pages were already allocated from a
+ * previous (failed) allocation.
+ */
+BEGIN metagroup 29
+DB fileid int32_t ld
+ARG bucket u_int32_t lu
+WRLOCK mmpgno db_pgno_t lu
+POINTER mmetalsn DB_LSN * lu
+WRLOCKNZ mpgno db_pgno_t lu
+POINTER metalsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+POINTER pagelsn DB_LSN * lu
+ARG newalloc u_int32_t lu
+END
+
+/*
+ * groupalloc
+ *
+ * This is used in conjunction with MPOOL_NEW_GROUP when we are creating
+ * a new database to make sure that we recreate or reclaim free pages
+ * when we allocate a chunk of contiguous ones during database creation.
+ *
+ * pgno: meta-data page number
+ * metalsn: meta-data lsn
+ * start_pgno: starting page number
+ * num: number of allocated pages
+ */
+BEGIN groupalloc 32
+DB fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+WRLOCK start_pgno db_pgno_t lu
+ARG num u_int32_t lu
+ARG free db_pgno_t lu
+END
+
+/*
+ * Records for backing out cursor adjustment.
+ * curadj - added or deleted a record or a dup
+ * within a record.
+ * pgno - page that was effected
+ * indx - indx of recrod effected.
+ * len - if a dup its length.
+ * dup_off - if a dup its offset
+ * add - 1 if add 0 if delete
+ * is_dup - 1 if dup 0 otherwise.
+ * order - order assigned to this deleted record or dup.
+ *
+ * chgpg - rmoved a page, move the records to a new page
+ * mode - CHGPG page was deleted or records move to new page.
+ * - SPLIT we split a bucket
+ * - DUP we convered to off page duplicates.
+ * old_pgno, new_pgno - old and new page numbers.
+ * old_index, new_index - old and new index numbers, NDX_INVALID if
+ * it effects all records on the page.
+ * For three opcodes new in 3.3 (DB_HAM_DELFIRSTPG, DELMIDPG,
+ * and DELLASTPG), we overload old_indx and new_indx to avoid
+ * needing a new log record type: old_indx stores the only
+ * indx of interest to these records, and new_indx stores the
+ * order that's assigned to the lowest deleted record we're
+ * moving.
+ */
+BEGIN curadj 33
+DB fileid int32_t ld
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG len u_int32_t lu
+ARG dup_off u_int32_t lu
+ARG add int ld
+ARG is_dup int ld
+ARG order u_int32_t lu
+END
+
+BEGIN chgpg 34
+DB fileid int32_t ld
+ARG mode db_ham_mode ld
+ARG old_pgno db_pgno_t lu
+ARG new_pgno db_pgno_t lu
+ARG old_indx u_int32_t lu
+ARG new_indx u_int32_t lu
+END
+
diff --git a/storage/bdb/hash/hash_conv.c b/storage/bdb/hash/hash_conv.c
new file mode 100644
index 00000000000..a93e56a2ee4
--- /dev/null
+++ b/storage/bdb/hash/hash_conv.c
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_conv.c,v 11.13 2002/08/06 05:34:35 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/hash.h"
+
+/*
+ * __ham_pgin --
+ * Convert host-specific page layout from the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgin(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ h = pp;
+ pginfo = (DB_PGINFO *)cookie->data;
+
+ /*
+ * The hash access method does blind reads of pages, causing them
+ * to be created. If the type field isn't set it's one of them,
+ * initialize the rest of the page and return.
+ */
+ if (h->type != P_HASHMETA && h->pgno == PGNO_INVALID) {
+ P_INIT(pp, (db_indx_t)pginfo->db_pagesize,
+ pg, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ return (0);
+ }
+
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
+}
+
+/*
+ * __ham_pgout --
+ * Convert host-specific page layout to the host-independent format
+ * stored on disk.
+ *
+ * PUBLIC: int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+ */
+int
+__ham_pgout(dbenv, dummydbp, pg, pp, cookie)
+ DB_ENV *dbenv;
+ DB *dummydbp;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ PAGE *h;
+
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ return (h->type == P_HASHMETA ? __ham_mswap(pp) :
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
+}
+
+/*
+ * __ham_mswap --
+ * Swap the bytes on the hash metadata page.
+ *
+ * PUBLIC: int __ham_mswap __P((void *));
+ */
+int
+__ham_mswap(pg)
+ void *pg;
+{
+ u_int8_t *p;
+ int i;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* max_bucket */
+ SWAP32(p); /* high_mask */
+ SWAP32(p); /* low_mask */
+ SWAP32(p); /* ffactor */
+ SWAP32(p); /* nelem */
+ SWAP32(p); /* h_charkey */
+ for (i = 0; i < NCACHED; ++i)
+ SWAP32(p); /* spares */
+ p += 59 * sizeof(u_int32_t); /* unusued */
+ SWAP32(p); /* crypto_magic */
+ return (0);
+}
diff --git a/storage/bdb/hash/hash_dup.c b/storage/bdb/hash/hash_dup.c
new file mode 100644
index 00000000000..ec70e519d54
--- /dev/null
+++ b/storage/bdb/hash/hash_dup.c
@@ -0,0 +1,891 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_dup.c,v 11.76 2002/08/06 05:34:40 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Manipulation of duplicates for the hash package.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/btree.h"
+
+static int __ham_c_chgpg __P((DBC *,
+ db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+static int __ham_check_move __P((DBC *, u_int32_t));
+static int __ham_dcursor __P((DBC *, db_pgno_t, u_int32_t));
+static int __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
+
+/*
+ * Called from hash_access to add a duplicate key. nval is the new
+ * value that we want to add. The flags correspond to the flag values
+ * to cursor_put indicating where to add the new element.
+ * There are 4 cases.
+ * Case 1: The existing duplicate set already resides on a separate page.
+ * We return and let the common code handle this.
+ * Case 2: The element is small enough to just be added to the existing set.
+ * Case 3: The element is large enough to be a big item, so we're going to
+ * have to push the set onto a new page.
+ * Case 4: The element is large enough to push the duplicate set onto a
+ * separate page.
+ *
+ * PUBLIC: int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+ */
+int
+__ham_add_dup(dbc, nval, flags, pgnop)
+ DBC *dbc;
+ DBT *nval;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBT pval, tmp_val;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ u_int32_t add_bytes, new_size;
+ int cmp, ret;
+ u_int8_t *hk;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ DB_ASSERT(flags != DB_CURRENT);
+
+ add_bytes = nval->size +
+ (F_ISSET(nval, DB_DBT_PARTIAL) ? nval->doff : 0);
+ add_bytes = DUP_SIZE(add_bytes);
+
+ if ((ret = __ham_check_move(dbc, add_bytes)) != 0)
+ return (ret);
+
+ /*
+ * Check if resulting duplicate set is going to need to go
+ * onto a separate duplicate page. If so, convert the
+ * duplicate set and add the new one. After conversion,
+ * hcp->dndx is the first free ndx or the index of the
+ * current pointer into the duplicate set.
+ */
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ /* Add the len bytes to the current singleton. */
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ add_bytes += DUP_SIZE(0);
+ new_size =
+ LEN_HKEYDATA(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)) +
+ add_bytes;
+
+ /*
+ * We convert to off-page duplicates if the item is a big item,
+ * the addition of the new item will make the set large, or
+ * if there isn't enough room on this page to add the next item.
+ */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP &&
+ (HPAGE_PTYPE(hk) == H_OFFPAGE || ISBIG(hcp, new_size) ||
+ add_bytes > P_FREESPACE(dbp, hcp->page))) {
+
+ if ((ret = __ham_dup_convert(dbc)) != 0)
+ return (ret);
+ return (hcp->opd->c_am_put(hcp->opd,
+ NULL, nval, flags, NULL));
+ }
+
+ /* There are two separate cases here: on page and off page. */
+ if (HPAGE_PTYPE(hk) != H_OFFDUP) {
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE) {
+ pval.flags = 0;
+ pval.data = HKEYDATA_DATA(hk);
+ pval.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize,
+ hcp->indx);
+ if ((ret = __ham_make_dup(dbp->dbenv,
+ &pval, &tmp_val, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) != 0 || (ret =
+ __ham_replpair(dbc, &tmp_val, 1)) != 0)
+ return (ret);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+
+ /*
+ * Update the cursor position since we now are in
+ * duplicates.
+ */
+ F_SET(hcp, H_ISDUP);
+ hcp->dup_off = 0;
+ hcp->dup_len = pval.size;
+ hcp->dup_tlen = DUP_SIZE(hcp->dup_len);
+ }
+
+ /* Now make the new entry a duplicate. */
+ if ((ret = __ham_make_dup(dbp->dbenv, nval,
+ &tmp_val, &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
+ return (ret);
+
+ tmp_val.dlen = 0;
+ switch (flags) { /* On page. */
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_NODUPDATA:
+ if (dbp->dup_compare != NULL) {
+ __ham_dsearch(dbc,
+ nval, &tmp_val.doff, &cmp, flags);
+
+ /* dup dups are not supported w/ sorted dups */
+ if (cmp == 0)
+ return (__db_duperr(dbp, flags));
+ } else {
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ hcp->dup_len = nval->size;
+ F_SET(hcp, H_ISDUP);
+ if (flags == DB_KEYFIRST)
+ hcp->dup_off = tmp_val.doff = 0;
+ else
+ hcp->dup_off =
+ tmp_val.doff = hcp->dup_tlen;
+ }
+ break;
+ case DB_BEFORE:
+ tmp_val.doff = hcp->dup_off;
+ break;
+ case DB_AFTER:
+ tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len);
+ break;
+ }
+ /* Add the duplicate. */
+ ret = __ham_replpair(dbc, &tmp_val, 0);
+ if (ret == 0)
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
+ if (ret != 0)
+ return (ret);
+
+ /* Now, update the cursor if necessary. */
+ switch (flags) {
+ case DB_AFTER:
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ hcp->dup_len = nval->size;
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
+ break;
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ case DB_BEFORE:
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
+ hcp->dup_len = nval->size;
+ break;
+ }
+ ret = __ham_c_update(dbc, tmp_val.size, 1, 1);
+ return (ret);
+ }
+
+ /*
+ * If we get here, then we're on duplicate pages; set pgnop and
+ * return so the common code can handle it.
+ */
+ memcpy(pgnop, HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+
+ return (ret);
+}
+
+/*
+ * Convert an on-page set of duplicates to an offpage set of duplicates.
+ *
+ * PUBLIC: int __ham_dup_convert __P((DBC *));
+ */
+int
+__ham_dup_convert(dbc)
+ DBC *dbc;
+{
+ BOVERFLOW bo;
+ DB *dbp;
+ DBC **hcs;
+ DBT dbt;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HOFFPAGE ho;
+ PAGE *dp;
+ db_indx_t i, len, off;
+ int c, ret, t_ret;
+ u_int8_t *p, *pend;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * Create a new page for the duplicates.
+ */
+ if ((ret = __db_new(dbc,
+ dbp->dup_compare == NULL ? P_LRECNO : P_LDUP, &dp)) != 0)
+ return (ret);
+ P_INIT(dp, dbp->pgsize,
+ dp->pgno, PGNO_INVALID, PGNO_INVALID, LEAFLEVEL, TYPE(dp));
+
+ /*
+ * Get the list of cursors that may need to be updated.
+ */
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(hcp->page), (u_int32_t)hcp->indx, &hcs)) != 0)
+ goto err;
+
+ /*
+ * Now put the duplicates onto the new page.
+ */
+ dbt.flags = 0;
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
+ case H_KEYDATA:
+ /* Simple case, one key on page; move it to dup page. */
+ dbt.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ dbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ ret = __db_pitem(dbc,
+ dp, 0, BKEYDATA_SIZE(dbt.size), NULL, &dbt);
+ goto finish;
+ case H_OFFPAGE:
+ /* Simple case, one key on page; move it to dup page. */
+ memcpy(&ho, P_ENTRY(dbp, hcp->page, H_DATAINDEX(hcp->indx)),
+ HOFFPAGE_SIZE);
+ UMRW_SET(bo.unused1);
+ B_TSET(bo.type, ho.type, 0);
+ UMRW_SET(bo.unused2);
+ bo.pgno = ho.pgno;
+ bo.tlen = ho.tlen;
+ dbt.size = BOVERFLOW_SIZE;
+ dbt.data = &bo;
+
+ ret = __db_pitem(dbc, dp, 0, dbt.size, &dbt, NULL);
+finish: if (ret == 0) {
+ if ((ret = mpf->set(mpf, dp, DB_MPOOL_DIRTY)) != 0)
+ break;
+
+ /* Update any other cursors. */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, 0)) != 0)
+ break;
+ }
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if ((ret = __ham_dcursor(hcs[c],
+ PGNO(dp), 0)) != 0)
+ break;
+ }
+ break;
+ case H_DUPLICATE:
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ pend = p +
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+
+ /*
+ * We need to maintain the duplicate cursor position.
+ * Keep track of where we are in the duplicate set via
+ * the offset, and when it matches the one in the cursor,
+ * set the off-page duplicate cursor index to the current
+ * index.
+ */
+ for (off = 0, i = 0; p < pend; i++) {
+ memcpy(&len, p, sizeof(db_indx_t));
+ dbt.size = len;
+ p += sizeof(db_indx_t);
+ dbt.data = p;
+ p += len + sizeof(db_indx_t);
+ if ((ret = __db_pitem(dbc, dp,
+ i, BKEYDATA_SIZE(dbt.size), NULL, &dbt)) != 0)
+ break;
+
+ /* Update any other cursors */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, i)) != 0)
+ break;
+ }
+ for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
+ if (((HASH_CURSOR *)(hcs[c]->internal))->dup_off
+ == off && (ret = __ham_dcursor(hcs[c],
+ PGNO(dp), i)) != 0)
+ goto err;
+ off += len + 2 * sizeof(db_indx_t);
+ }
+ break;
+ default:
+ ret = __db_pgfmt(dbp->dbenv, (u_long)hcp->pgno);
+ break;
+ }
+
+ /*
+ * Now attach this to the source page in place of the old duplicate
+ * item.
+ */
+ if (ret == 0)
+ ret = __ham_move_offpage(dbc, hcp->page,
+ (u_int32_t)H_DATAINDEX(hcp->indx), PGNO(dp));
+
+err: if (ret == 0)
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
+
+ if ((t_ret =
+ mpf->put(mpf, dp, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret == 0)
+ hcp->dup_tlen = hcp->dup_off = hcp->dup_len = 0;
+
+ if (hcs != NULL)
+ __os_free(dbp->dbenv, hcs);
+
+ return (ret);
+}
+
+/*
+ * __ham_make_dup
+ *
+ * Take a regular dbt and make it into a duplicate item with all the partial
+ * information set appropriately. If the incoming dbt is a partial, assume
+ * we are creating a new entry and make sure that we do any initial padding.
+ *
+ * PUBLIC: int __ham_make_dup __P((DB_ENV *,
+ * PUBLIC: const DBT *, DBT *d, void **, u_int32_t *));
+ */
+int
+__ham_make_dup(dbenv, notdup, duplicate, bufp, sizep)
+ DB_ENV *dbenv;
+ const DBT *notdup;
+ DBT *duplicate;
+ void **bufp;
+ u_int32_t *sizep;
+{
+ db_indx_t tsize, item_size;
+ int ret;
+ u_int8_t *p;
+
+ item_size = (db_indx_t)notdup->size;
+ if (F_ISSET(notdup, DB_DBT_PARTIAL))
+ item_size += notdup->doff;
+
+ tsize = DUP_SIZE(item_size);
+ if ((ret = __ham_init_dbt(dbenv, duplicate, tsize, bufp, sizep)) != 0)
+ return (ret);
+
+ duplicate->dlen = 0;
+ duplicate->flags = notdup->flags;
+ F_SET(duplicate, DB_DBT_PARTIAL);
+
+ p = duplicate->data;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+ p += sizeof(db_indx_t);
+ if (F_ISSET(notdup, DB_DBT_PARTIAL)) {
+ memset(p, 0, notdup->doff);
+ p += notdup->doff;
+ }
+ memcpy(p, notdup->data, notdup->size);
+ p += notdup->size;
+ memcpy(p, &item_size, sizeof(db_indx_t));
+
+ duplicate->doff = 0;
+ duplicate->dlen = notdup->size;
+
+ return (0);
+}
+
+/*
+ * __ham_check_move --
+ *
+ * Check if we can do whatever we need to on this page. If not,
+ * then we'll have to move the current element to a new page.
+ */
+static int
+__ham_check_move(dbc, add_len)
+ DBC *dbc;
+ u_int32_t add_len;
+{
+ DB *dbp;
+ DBT k, d;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *next_pagep;
+ db_pgno_t next_pgno;
+ u_int32_t new_datalen, old_len, rectype;
+ u_int8_t *hk;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+
+ /*
+ * If the item is already off page duplicates or an offpage item,
+ * then we know we can do whatever we need to do in-place
+ */
+ if (HPAGE_PTYPE(hk) == H_OFFDUP || HPAGE_PTYPE(hk) == H_OFFPAGE)
+ return (0);
+
+ old_len = LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx));
+ new_datalen = old_len - HKEYDATA_SIZE(0) + add_len;
+ if (HPAGE_PTYPE(hk) != H_DUPLICATE)
+ new_datalen += DUP_SIZE(0);
+
+ /*
+ * We need to add a new page under two conditions:
+ * 1. The addition makes the total data length cross the BIG
+ * threshold and the OFFDUP structure won't fit on this page.
+ * 2. The addition does not make the total data cross the
+ * threshold, but the new data won't fit on the page.
+ * If neither of these is true, then we can return.
+ */
+ if (ISBIG(hcp, new_datalen) && (old_len > HOFFDUP_SIZE ||
+ HOFFDUP_SIZE - old_len <= P_FREESPACE(dbp, hcp->page)))
+ return (0);
+
+ if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(dbp, hcp->page))
+ return (0);
+
+ /*
+ * If we get here, then we need to move the item to a new page.
+ * Check if there are more pages in the chain. We now need to
+ * update new_datalen to include the size of both the key and
+ * the data that we need to move.
+ */
+
+ new_datalen = ISBIG(hcp, new_datalen) ?
+ HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen);
+ new_datalen += LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_KEYINDEX(hcp->indx));
+
+ next_pagep = NULL;
+ for (next_pgno = NEXT_PGNO(hcp->page); next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(next_pagep)) {
+ if (next_pagep != NULL &&
+ (ret = mpf->put(mpf, next_pagep, 0)) != 0)
+ return (ret);
+
+ if ((ret = mpf->get(mpf,
+ &next_pgno, DB_MPOOL_CREATE, &next_pagep)) != 0)
+ return (ret);
+
+ if (P_FREESPACE(dbp, next_pagep) >= new_datalen)
+ break;
+ }
+
+ /* No more pages, add one. */
+ if (next_pagep == NULL && (ret = __ham_add_ovflpage(dbc,
+ hcp->page, 0, &next_pagep)) != 0)
+ return (ret);
+
+ /* Add new page at the end of the chain. */
+ if (P_FREESPACE(dbp, next_pagep) < new_datalen && (ret =
+ __ham_add_ovflpage(dbc, next_pagep, 1, &next_pagep)) != 0) {
+ (void)mpf->put(mpf, next_pagep, 0);
+ return (ret);
+ }
+
+ /* Copy the item to the new page. */
+ if (DBC_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ k.flags = 0;
+ d.flags = 0;
+ if (HPAGE_PTYPE(
+ H_PAIRKEY(dbp, hcp->page, hcp->indx)) == H_OFFPAGE) {
+ rectype |= PAIR_KEYMASK;
+ k.data = H_PAIRKEY(dbp, hcp->page, hcp->indx);
+ k.size = HOFFPAGE_SIZE;
+ } else {
+ k.data =
+ HKEYDATA_DATA(H_PAIRKEY(dbp, hcp->page, hcp->indx));
+ k.size =
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ }
+
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ rectype |= PAIR_DATAMASK;
+ d.data = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ d.size = HOFFPAGE_SIZE;
+ } else {
+ if (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))
+ == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+ d.data =
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ d.size = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
+ }
+
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, rectype, PGNO(next_pagep),
+ (u_int32_t)NUM_ENT(next_pagep), &LSN(next_pagep),
+ &k, &d)) != 0) {
+ (void)mpf->put(mpf, next_pagep, 0);
+ return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(next_pagep) = new_lsn; /* Structure assignment. */
+
+ __ham_copy_item(dbp, hcp->page, H_KEYINDEX(hcp->indx), next_pagep);
+ __ham_copy_item(dbp, hcp->page, H_DATAINDEX(hcp->indx), next_pagep);
+
+ /*
+ * We've just manually inserted a key and set of data onto
+ * next_pagep; however, it's possible that our caller will
+ * return without further modifying the new page, for instance
+ * if DB_NODUPDATA is set and our new item is a duplicate duplicate.
+ * Thus, to be on the safe side, we need to mark the page dirty
+ * here. [#2996]
+ *
+ * Note that __ham_del_pair should dirty the page we're moving
+ * the items from, so we need only dirty the new page ourselves.
+ */
+ if ((ret = mpf->set(mpf, next_pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+
+ /* Update all cursors that used to point to this item. */
+ if ((ret = __ham_c_chgpg(dbc, PGNO(hcp->page), H_KEYINDEX(hcp->indx),
+ PGNO(next_pagep), NUM_ENT(next_pagep) - 2)) != 0)
+ goto out;
+
+ /* Now delete the pair from the current page. */
+ ret = __ham_del_pair(dbc, 0);
+
+ /*
+ * __ham_del_pair decremented nelem. This is incorrect; we
+ * manually copied the element elsewhere, so the total number
+ * of elements hasn't changed. Increment it again.
+ *
+ * !!!
+ * Note that we still have the metadata page pinned, and
+ * __ham_del_pair dirtied it, so we don't need to set the dirty
+ * flag again.
+ */
+ if (!STD_LOCKING(dbc))
+ hcp->hdr->nelem++;
+
+out:
+ (void)mpf->put(mpf, hcp->page, DB_MPOOL_DIRTY);
+ hcp->page = next_pagep;
+ hcp->pgno = PGNO(hcp->page);
+ hcp->indx = NUM_ENT(hcp->page) - 2;
+ F_SET(hcp, H_EXPAND);
+ F_CLR(hcp, H_DELETED);
+
+ return (ret);
+}
+
+/*
+ * __ham_move_offpage --
+ * Replace an onpage set of duplicates with the OFFDUP structure
+ * that references the duplicate page.
+ *
+ * XXX
+ * This is really just a special case of __onpage_replace; we should
+ * probably combine them.
+ *
+ */
+static int
+__ham_move_offpage(dbc, pagep, ndx, pgno)
+ DBC *dbc;
+ PAGE *pagep;
+ u_int32_t ndx;
+ db_pgno_t pgno;
+{
+ DB *dbp;
+ DBT new_dbt;
+ DBT old_dbt;
+ HOFFDUP od;
+ db_indx_t i, *inp;
+ int32_t shrink;
+ u_int8_t *src;
+ int ret;
+
+ dbp = dbc->dbp;
+ od.type = H_OFFDUP;
+ UMRW_SET(od.unused[0]);
+ UMRW_SET(od.unused[1]);
+ UMRW_SET(od.unused[2]);
+ od.pgno = pgno;
+ ret = 0;
+
+ if (DBC_LOGGING(dbc)) {
+ new_dbt.data = &od;
+ new_dbt.size = HOFFDUP_SIZE;
+ old_dbt.data = P_ENTRY(dbp, pagep, ndx);
+ old_dbt.size = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx);
+ if ((ret = __ham_replace_log(dbp, dbc->txn, &LSN(pagep), 0,
+ PGNO(pagep), (u_int32_t)ndx, &LSN(pagep), -1,
+ &old_dbt, &new_dbt, 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
+
+ shrink = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE;
+ inp = P_INP(dbp, pagep);
+
+ if (shrink != 0) {
+ /* Copy data. */
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ memmove(src + shrink, src, inp[ndx] - HOFFSET(pagep));
+ HOFFSET(pagep) += shrink;
+
+ /* Update index table. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ inp[i] += shrink;
+ }
+
+ /* Now copy the offdup entry onto the page. */
+ memcpy(P_ENTRY(dbp, pagep, ndx), &od, HOFFDUP_SIZE);
+ return (ret);
+}
+
+/*
+ * __ham_dsearch:
+ * Locate a particular duplicate in a duplicate set. Make sure that
+ * we exit with the cursor set appropriately.
+ *
+ * PUBLIC: void __ham_dsearch
+ * PUBLIC: __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t));
+ */
+void
+__ham_dsearch(dbc, dbt, offp, cmpp, flags)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t *offp, flags;
+ int *cmpp;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ DBT cur;
+ db_indx_t i, len;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+ u_int8_t *data;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ func = dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare;
+
+ i = F_ISSET(hcp, H_CONTINUE) ? hcp->dup_off: 0;
+ data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) + i;
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ while (i < hcp->dup_tlen) {
+ memcpy(&len, data, sizeof(db_indx_t));
+ data += sizeof(db_indx_t);
+ cur.data = data;
+ cur.size = (u_int32_t)len;
+
+ /*
+ * If we find an exact match, we're done. If in a sorted
+ * duplicate set and the item is larger than our test item,
+ * we're done. In the latter case, if permitting partial
+ * matches, it's not a failure.
+ */
+ *cmpp = func(dbp, dbt, &cur);
+ if (*cmpp == 0)
+ break;
+ if (*cmpp < 0 && dbp->dup_compare != NULL) {
+ if (flags == DB_GET_BOTH_RANGE)
+ *cmpp = 0;
+ break;
+ }
+
+ i += len + 2 * sizeof(db_indx_t);
+ data += len + sizeof(db_indx_t);
+ }
+
+ *offp = i;
+ hcp->dup_off = i;
+ hcp->dup_len = len;
+ F_SET(hcp, H_ISDUP);
+}
+
+#ifdef DEBUG
+/*
+ * __ham_cprint --
+ * Display the current cursor list.
+ *
+ * PUBLIC: void __ham_cprint __P((DBC *));
+ */
+void
+__ham_cprint(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *cp;
+
+ cp = (HASH_CURSOR *)dbc->internal;
+
+ fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu",
+ P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno,
+ (u_long)cp->indx);
+ if (F_ISSET(cp, H_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
+}
+#endif /* DEBUG */
+
+/*
+ * __ham_dcursor --
+ *
+ * Create an off page duplicate cursor for this cursor.
+ */
+static int
+__ham_dcursor(dbc, pgno, indx)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int32_t indx;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ BTREE_CURSOR *dcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __db_c_newopd(dbc, pgno, hcp->opd, &hcp->opd)) != 0)
+ return (ret);
+
+ dcp = (BTREE_CURSOR *)hcp->opd->internal;
+ dcp->pgno = pgno;
+ dcp->indx = indx;
+
+ if (dbp->dup_compare == NULL) {
+ /*
+ * Converting to off-page Recno trees is tricky. The
+ * record number for the cursor is the index + 1 (to
+ * convert to 1-based record numbers).
+ */
+ dcp->recno = indx + 1;
+ }
+
+ /*
+ * Transfer the deleted flag from the top-level cursor to the
+ * created one.
+ */
+ if (F_ISSET(hcp, H_DELETED)) {
+ F_SET(dcp, C_DELETED);
+ F_CLR(hcp, H_DELETED);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_c_chgpg --
+ * Adjust the cursors after moving an item to a new page. We only
+ * move cursors that are pointing at this one item and are not
+ * deleted; since we only touch non-deleted cursors, and since
+ * (by definition) no item existed at the pgno/indx we're moving the
+ * item to, we're guaranteed that all the cursors we affect here or
+ * on abort really do refer to this one item.
+ */
+static int
+__ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t old_index, new_index;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+
+ /*
+ * If a cursor is deleted, it doesn't refer to this
+ * item--it just happens to have the same indx, but
+ * it points to a former neighbor. Don't move it.
+ */
+ if (F_ISSET(hcp, H_DELETED))
+ continue;
+
+ if (hcp->pgno == old_pgno) {
+ if (hcp->indx == old_index) {
+ hcp->pgno = new_pgno;
+ hcp->indx = new_index;
+ } else
+ continue;
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, DB_HAM_CHGPG,
+ old_pgno, new_pgno, old_index, new_index)) != 0)
+ return (ret);
+ }
+ return (0);
+}
diff --git a/storage/bdb/hash/hash_func.c b/storage/bdb/hash/hash_func.c
new file mode 100644
index 00000000000..c6cc2ad4460
--- /dev/null
+++ b/storage/bdb/hash/hash_func.c
@@ -0,0 +1,245 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_func.c,v 11.12 2002/03/28 19:49:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __ham_func2 --
+ * Phong Vo's linear congruential hash.
+ *
+ * PUBLIC: u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+ */
+#define DCHARHASH(h, c) ((h) = 0x63c63cd9*(h) + 0x9c39c33d + (c))
+
+u_int32_t
+__ham_func2(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *e, *k;
+ u_int32_t h;
+ u_int8_t c;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k != e;) {
+ c = *k++;
+ if (!c && k > e)
+ break;
+ DCHARHASH(h, c);
+ }
+ return (h);
+}
+
+/*
+ * __ham_func3 --
+ * Ozan Yigit's original sdbm hash.
+ *
+ * Ugly, but fast. Break the string up into 8 byte units. On the first time
+ * through the loop get the "leftover bytes" (strlen % 8). On every other
+ * iteration, perform 8 HASHC's so we handle all 8 bytes. Essentially, this
+ * saves us 7 cmp & branch instructions.
+ *
+ * PUBLIC: u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func3(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t n, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASHC n = *k++ + 65599 * n
+ n = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASHC;
+ case 7:
+ HASHC;
+ case 6:
+ HASHC;
+ case 5:
+ HASHC;
+ case 4:
+ HASHC;
+ case 3:
+ HASHC;
+ case 2:
+ HASHC;
+ case 1:
+ HASHC;
+ } while (--loop);
+ }
+ return (n);
+}
+
+/*
+ * __ham_func4 --
+ * Chris Torek's hash function. Although this function performs only
+ * slightly worse than __ham_func5 on strings, it performs horribly on
+ * numbers.
+ *
+ * PUBLIC: u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func4(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k;
+ u_int32_t h, loop;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ if (len == 0)
+ return (0);
+
+#define HASH4a h = (h << 5) - h + *k++;
+#define HASH4b h = (h << 5) + h + *k++;
+#define HASH4 HASH4b
+ h = 0;
+ k = key;
+
+ loop = (len + 8 - 1) >> 3;
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASH4;
+ case 7:
+ HASH4;
+ case 6:
+ HASH4;
+ case 5:
+ HASH4;
+ case 4:
+ HASH4;
+ case 3:
+ HASH4;
+ case 2:
+ HASH4;
+ case 1:
+ HASH4;
+ } while (--loop);
+ }
+ return (h);
+}
+
+/*
+ * Fowler/Noll/Vo hash
+ *
+ * The basis of the hash algorithm was taken from an idea sent by email to the
+ * IEEE Posix P1003.2 mailing list from Phong Vo (kpv@research.att.com) and
+ * Glenn Fowler (gsf@research.att.com). Landon Curt Noll (chongo@toad.com)
+ * later improved on their algorithm.
+ *
+ * The magic is in the interesting relationship between the special prime
+ * 16777619 (2^24 + 403) and 2^32 and 2^8.
+ *
+ * This hash produces the fewest collisions of any function that we've seen so
+ * far, and works well on both numbers and strings.
+ *
+ * PUBLIC: u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_func5(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ const u_int8_t *k, *e;
+ u_int32_t h;
+
+ if (dbp != NULL)
+ COMPQUIET(dbp, NULL);
+
+ k = key;
+ e = k + len;
+ for (h = 0; k < e; ++k) {
+ h *= 16777619;
+ h ^= *k;
+ }
+ return (h);
+}
+
+/*
+ * __ham_test --
+ *
+ * PUBLIC: u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+ */
+u_int32_t
+__ham_test(dbp, key, len)
+ DB *dbp;
+ const void *key;
+ u_int32_t len;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(len, 0);
+ return ((u_int32_t)*(char *)key);
+}
diff --git a/storage/bdb/hash/hash_meta.c b/storage/bdb/hash/hash_meta.c
new file mode 100644
index 00000000000..9f224454869
--- /dev/null
+++ b/storage/bdb/hash/hash_meta.c
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_meta.c,v 11.19 2002/06/03 14:22:15 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+/*
+ * Acquire the meta-data page.
+ *
+ * PUBLIC: int __ham_get_meta __P((DBC *));
+ */
+int
+__ham_get_meta(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (dbenv != NULL &&
+ STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_READ, &hcp->hlock)) != 0)
+ return (ret);
+ }
+
+ if ((ret = mpf->get(mpf,
+ &hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0 &&
+ LOCK_ISSET(hcp->hlock))
+ (void)dbenv->lock_put(dbenv, &hcp->hlock);
+
+ return (ret);
+}
+
+/*
+ * Release the meta-data page.
+ *
+ * PUBLIC: int __ham_release_meta __P((DBC *));
+ */
+int
+__ham_release_meta(dbc)
+ DBC *dbc;
+{
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+
+ mpf = dbc->dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->hdr)
+ (void)mpf->put(mpf, hcp->hdr,
+ F_ISSET(hcp, H_DIRTY) ? DB_MPOOL_DIRTY : 0);
+ hcp->hdr = NULL;
+ if (!F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE) &&
+ dbc->txn == NULL && LOCK_ISSET(hcp->hlock))
+ (void)dbc->dbp->dbenv->lock_put(dbc->dbp->dbenv, &hcp->hlock);
+ F_CLR(hcp, H_DIRTY);
+
+ return (0);
+}
+
+/*
+ * Mark the meta-data page dirty.
+ *
+ * PUBLIC: int __ham_dirty_meta __P((DBC *));
+ */
+int
+__ham_dirty_meta(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_LOCK _tmp;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
+ if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
+ dbenv = dbp->dbenv;
+ dbc->lock.pgno = hashp->meta_pgno;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
+ &dbc->lock_dbt, DB_LOCK_WRITE, &_tmp)) == 0) {
+ ret = dbenv->lock_put(dbenv, &hcp->hlock);
+ hcp->hlock = _tmp;
+ }
+ }
+
+ if (ret == 0)
+ F_SET(hcp, H_DIRTY);
+ return (ret);
+}
diff --git a/storage/bdb/hash/hash_method.c b/storage/bdb/hash/hash_method.c
new file mode 100644
index 00000000000..9a6bf59536a
--- /dev/null
+++ b/storage/bdb/hash/hash_method.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_method.c,v 11.12 2002/03/27 04:32:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+
+static int __ham_set_h_ffactor __P((DB *, u_int32_t));
+static int __ham_set_h_hash
+ __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+static int __ham_set_h_nelem __P((DB *, u_int32_t));
+
+/*
+ * __ham_db_create --
+ * Hash specific initialization of the DB structure.
+ *
+ * PUBLIC: int __ham_db_create __P((DB *));
+ */
+int
+__ham_db_create(dbp)
+ DB *dbp;
+{
+ HASH *hashp;
+ int ret;
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ sizeof(HASH), &dbp->h_internal)) != 0)
+ return (ret);
+
+ hashp = dbp->h_internal;
+
+ hashp->h_nelem = 0; /* Defaults. */
+ hashp->h_ffactor = 0;
+ hashp->h_hash = NULL;
+
+ dbp->set_h_ffactor = __ham_set_h_ffactor;
+ dbp->set_h_hash = __ham_set_h_hash;
+ dbp->set_h_nelem = __ham_set_h_nelem;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_db_close __P((DB *));
+ */
+int
+__ham_db_close(dbp)
+ DB *dbp;
+{
+ if (dbp->h_internal == NULL)
+ return (0);
+ __os_free(dbp->dbenv, dbp->h_internal);
+ dbp->h_internal = NULL;
+ return (0);
+}
+
+/*
+ * __ham_set_h_ffactor --
+ * Set the fill factor.
+ */
+static int
+__ham_set_h_ffactor(dbp, h_ffactor)
+ DB *dbp;
+ u_int32_t h_ffactor;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_ffactor");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_ffactor = h_ffactor;
+ return (0);
+}
+
+/*
+ * __ham_set_h_hash --
+ * Set the hash function.
+ */
+static int
+__ham_set_h_hash(dbp, func)
+ DB *dbp;
+ u_int32_t (*func) __P((DB *, const void *, u_int32_t));
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_hash");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_hash = func;
+ return (0);
+}
+
+/*
+ * __ham_set_h_nelem --
+ * Set the table size.
+ */
+static int
+__ham_set_h_nelem(dbp, h_nelem)
+ DB *dbp;
+ u_int32_t h_nelem;
+{
+ HASH *hashp;
+
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_h_nelem");
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ hashp = dbp->h_internal;
+ hashp->h_nelem = h_nelem;
+ return (0);
+}
diff --git a/storage/bdb/hash/hash_open.c b/storage/bdb/hash/hash_open.c
new file mode 100644
index 00000000000..f976f5b6816
--- /dev/null
+++ b/storage/bdb/hash/hash_open.c
@@ -0,0 +1,558 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_open.c,v 11.175 2002/09/04 19:06:44 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+
+static db_pgno_t __ham_init_meta __P((DB *, HMETA *, db_pgno_t, DB_LSN *));
+
+/*
+ * __ham_open --
+ *
+ * PUBLIC: int __ham_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char * name, db_pgno_t, u_int32_t));
+ */
+int
+__ham_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ int ret, t_ret;
+
+ COMPQUIET(name, NULL);
+ dbenv = dbp->dbenv;
+ dbc = NULL;
+ mpf = dbp->mpf;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __ham_stat;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp,
+ txn, &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+ hashp->meta_pgno = base_pgno;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ /* Initialize the hdr structure. */
+ if (hcp->hdr->dbmeta.magic == DB_HASHMAGIC) {
+ /* File exists, verify the data in the header. */
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = hcp->hdr->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (!F_ISSET(dbp, DB_AM_RDONLY) && !IS_RECOVERING(dbenv) &&
+ hashp->h_hash(dbp,
+ CHARKEY, sizeof(CHARKEY)) != hcp->hdr->h_charkey) {
+ __db_err(dbp->dbenv,
+ "hash: incompatible hash function");
+ ret = EINVAL;
+ goto err2;
+ }
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
+ F_SET(dbp, DB_AM_DUPSORT);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!F_ISSET(dbp, DB_AM_RDONLY) &&
+ dbp->meta_pgno == PGNO_BASE_MD) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err2;
+ mpf->last_pgno(mpf, &hcp->hdr->dbmeta.last_pgno);
+ }
+ } else if (!IS_RECOVERING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER))
+ DB_ASSERT(0);
+
+err2: /* Release the meta data page */
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+err1: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __ham_metachk --
+ *
+ * PUBLIC: int __ham_metachk __P((DB *, const char *, HMETA *));
+ */
+int
+__ham_metachk(dbp, name, hashm)
+ DB *dbp;
+ const char *name;
+ HMETA *hashm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Hash.
+ * Check the version, the database may be out of date.
+ */
+ vers = hashm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 4:
+ case 5:
+ case 6:
+ __db_err(dbenv,
+ "%s: hash version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 7:
+ case 8:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported hash version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __ham_mswap((PAGE *)hashm)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_HASH && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_HASH;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret = __db_fchk(dbenv,
+ "DB->open", hashm->dbmeta.flags,
+ DB_HASH_DUP | DB_HASH_SUBDB | DB_HASH_DUPSORT)) != 0)
+ return (ret);
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported in file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort function specified but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = hashm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, hashm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+}
+
+/*
+ * __ham_init_meta --
+ *
+ * Initialize a hash meta-data page. We assume that the meta-data page is
+ * contiguous with the initial buckets that we create. If that turns out
+ * to be false, we'll fix it up later. Return the initial number of buckets
+ * allocated.
+ */
+static db_pgno_t
+__ham_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ HMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ HASH *hashp;
+ db_pgno_t nbuckets;
+ int i;
+ int32_t l2;
+
+ hashp = dbp->h_internal;
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = DB_HASHVERSION < 5 ? __ham_func4 : __ham_func5;
+
+ if (hashp->h_nelem != 0 && hashp->h_ffactor != 0) {
+ hashp->h_nelem = (hashp->h_nelem - 1) / hashp->h_ffactor + 1;
+ l2 = __db_log2(hashp->h_nelem > 2 ? hashp->h_nelem : 2);
+ } else
+ l2 = 1;
+ nbuckets = (db_pgno_t)(1 << l2);
+
+ memset(meta, 0, sizeof(HMETA));
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
+ meta->dbmeta.magic = DB_HASHMAGIC;
+ meta->dbmeta.version = DB_HASHVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_HASHMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
+ meta->max_bucket = nbuckets - 1;
+ meta->high_mask = nbuckets - 1;
+ meta->low_mask = (nbuckets >> 1) - 1;
+ meta->ffactor = hashp->h_ffactor;
+ meta->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, DB_HASH_DUP);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, DB_HASH_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, DB_HASH_DUPSORT);
+
+ /*
+ * Create the first and second buckets pages so that we have the
+ * page numbers for them and we can store that page number in the
+ * meta-data header (spares[0]).
+ */
+ meta->spares[0] = pgno + 1;
+
+ /* Fill in the last fields of the meta data page. */
+ for (i = 1; i <= l2; i++)
+ meta->spares[i] = meta->spares[0];
+ for (; i < NCACHED; i++)
+ meta->spares[i] = PGNO_INVALID;
+
+ return (nbuckets);
+}
+
+/*
+ * __ham_new_file --
+ * Create the necessary pages to begin a new database file. If name
+ * is NULL, then this is an unnamed file, the mpf has been set in the dbp
+ * and we simply create the pages using mpool. In this case, we don't log
+ * because we never have to redo an unnamed create and the undo simply
+ * frees resources.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__ham_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ HMETA *meta;
+ PAGE *page;
+ int ret;
+ db_pgno_t lpgno;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ meta = NULL;
+ page = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+ if (name == NULL) {
+ lpgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.type = dbp->type;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (HMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->dbmeta.last_pgno = lpgno;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now allocate the final hash bucket. */
+ if (name == NULL) {
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &page)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, 0, dbp->pgsize);
+#endif
+ page = (PAGE *)buf;
+ }
+
+ P_INIT(page, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN_NOT_LOGGED(page->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, page, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, lpgno, buf, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, lpgno * dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ page = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (page != NULL)
+ (void)mpf->put(mpf, page, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __ham_new_subdb --
+ * Create the necessary pages to begin a new subdatabase.
+ *
+ * PUBLIC: int __ham_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__ham_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock, mmlock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
+ HMETA *meta;
+ PAGE *h;
+ int i, ret, t_ret;
+ db_pgno_t lpgno, mpgno;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ mmeta = NULL;
+ LOCK_INIT(metalock);
+ LOCK_INIT(mmlock);
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get and lock the new meta data page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Initialize the new meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ lpgno = __ham_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+
+ /*
+ * We are about to allocate a set of contiguous buckets (lpgno
+ * worth). We need to get the master meta-data page to figure
+ * out where these pages are and to allocate them. So, lock and
+ * get the master meta data page.
+ */
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, mpgno, DB_LOCK_WRITE, 0, &mmlock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &mpgno, 0, &mmeta)) != 0)
+ goto err;
+
+ /*
+ * Now update the hash meta-data page to reflect where the first
+ * set of buckets are actually located.
+ */
+ meta->spares[0] = mmeta->last_pgno + 1;
+ for (i = 0; i < NCACHED && meta->spares[i] != PGNO_INVALID; i++)
+ meta->spares[i] = meta->spares[0];
+
+ /* The new meta data page is now complete; log it. */
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Reflect the group allocation. */
+ if (DBENV_LOGGING(dbenv))
+ if ((ret = __ham_groupalloc_log(mdbp, txn,
+ &LSN(mmeta), 0, &LSN(mmeta),
+ meta->spares[0], meta->max_bucket + 1, mmeta->free)) != 0)
+ goto err;
+
+ /* Release the new meta-data page. */
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+
+ mmeta->last_pgno +=lpgno;
+ lpgno = mmeta->last_pgno;
+
+ /* Now allocate the final hash bucket. */
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ P_INIT(h, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(h) = LSN(mmeta);
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+
+ /* Now put the master-metadata page back. */
+ if ((ret = mpf->put(mpf, mmeta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ mmeta = NULL;
+
+err:
+ if (mmeta != NULL)
+ if ((t_ret = mpf->put(mpf, mmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(mmlock))
+ if ((t_ret = __LPUT(dbc, mmlock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (meta != NULL)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/storage/bdb/hash/hash_page.c b/storage/bdb/hash/hash_page.c
new file mode 100644
index 00000000000..6788129773f
--- /dev/null
+++ b/storage/bdb/hash/hash_page.c
@@ -0,0 +1,1862 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_page.c,v 11.87 2002/08/15 02:46:20 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Page manipulation for hashing package.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+static int __ham_c_delpg
+ __P((DBC *, db_pgno_t, db_pgno_t, u_int32_t, db_ham_mode, u_int32_t *));
+
+/*
+ * PUBLIC: int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (F_ISSET(hcp, H_DELETED)) {
+ __db_err(dbp->dbenv, "Attempt to return a deleted item");
+ return (EINVAL);
+ }
+ F_CLR(hcp, H_OK | H_NOMORE);
+
+ /* Check if we need to get a page for this cursor. */
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+recheck:
+ /* Check if we are looking for space in which to insert an item. */
+ if (hcp->seek_size && hcp->seek_found_page == PGNO_INVALID &&
+ hcp->seek_size < P_FREESPACE(dbp, hcp->page))
+ hcp->seek_found_page = hcp->pgno;
+
+ /* Check for off-page duplicates. */
+ if (hcp->indx < NUM_ENT(hcp->page) &&
+ HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Check if we need to go on to the next page. */
+ if (F_ISSET(hcp, H_ISDUP))
+ /*
+ * ISDUP is set, and offset is at the beginning of the datum.
+ * We need to grab the length of the datum, then set the datum
+ * pointer to be the beginning of the datum.
+ */
+ memcpy(&hcp->dup_len,
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) +
+ hcp->dup_off, sizeof(db_indx_t));
+
+ if (hcp->indx >= (db_indx_t)NUM_ENT(hcp->page)) {
+ /* Fetch next page. */
+ if (NEXT_PGNO(hcp->page) == PGNO_INVALID) {
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ next_pgno = NEXT_PGNO(hcp->page);
+ hcp->indx = 0;
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ goto recheck;
+ }
+
+ F_SET(hcp, H_OK);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __ham_item_reset __P((DBC *));
+ */
+int
+__ham_item_reset(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
+ if (hcp->page != NULL)
+ ret = mpf->put(mpf, hcp->page, 0);
+
+ __ham_item_init(dbc);
+ return (ret);
+}
+
+/*
+ * PUBLIC: void __ham_item_init __P((DBC *));
+ */
+void
+__ham_item_init(dbc)
+ DBC *dbc;
+{
+ HASH_CURSOR *hcp;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ /*
+ * If this cursor still holds any locks, we must
+ * release them if we are not running with transactions.
+ */
+ (void)__TLPUT(dbc, hcp->lock);
+
+ /*
+ * The following fields must *not* be initialized here
+ * because they may have meaning across inits.
+ * hlock, hdr, split_buf, stats
+ */
+ hcp->bucket = BUCKET_INVALID;
+ hcp->lbucket = BUCKET_INVALID;
+ LOCK_INIT(hcp->lock);
+ hcp->lock_mode = DB_LOCK_NG;
+ hcp->dup_off = 0;
+ hcp->dup_len = 0;
+ hcp->dup_tlen = 0;
+ hcp->seek_size = 0;
+ hcp->seek_found_page = PGNO_INVALID;
+ hcp->flags = 0;
+
+ hcp->pgno = PGNO_INVALID;
+ hcp->indx = NDX_INVALID;
+ hcp->page = NULL;
+}
+
+/*
+ * Returns the last item in a bucket.
+ *
+ * PUBLIC: int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_last(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+
+ hcp->bucket = hcp->hdr->max_bucket;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ F_SET(hcp, H_OK);
+ return (__ham_item_prev(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_first(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_item_reset(dbc)) != 0)
+ return (ret);
+ F_SET(hcp, H_OK);
+ hcp->bucket = 0;
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ return (__ham_item_next(dbc, mode, pgnop));
+}
+
+/*
+ * __ham_item_prev --
+ * Returns a pointer to key/data pair on a page. In the case of
+ * bigkeys, just returns the page number and index of the bigkey
+ * pointer pair.
+ *
+ * PUBLIC: int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_prev(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ HASH_CURSOR *hcp;
+ db_pgno_t next_pgno;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+
+ /*
+ * There are 5 cases for backing up in a hash file.
+ * Case 1: In the middle of a page, no duplicates, just dec the index.
+ * Case 2: In the middle of a duplicate set, back up one.
+ * Case 3: At the beginning of a duplicate set, get out of set and
+ * back up to next key.
+ * Case 4: At the beginning of a page; go to previous page.
+ * Case 5: At the beginning of a bucket; go to prev bucket.
+ */
+ F_CLR(hcp, H_OK | H_NOMORE | H_DELETED);
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * First handle the duplicates. Either you'll get the key here
+ * or you'll exit the duplicate set and drop into the code below
+ * to handle backing up through keys.
+ */
+ if (!F_ISSET(hcp, H_NEXT_NODUP) && F_ISSET(hcp, H_ISDUP)) {
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) ==
+ H_OFFDUP) {
+ memcpy(pgnop,
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
+ F_SET(hcp, H_OK);
+ return (0);
+ }
+
+ /* Duplicates are on-page. */
+ if (hcp->dup_off != 0) {
+ memcpy(&hcp->dup_len, HKEYDATA_DATA(
+ H_PAIRDATA(dbp, hcp->page, hcp->indx))
+ + hcp->dup_off - sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ hcp->dup_off -=
+ DUP_SIZE(hcp->dup_len);
+ return (__ham_item(dbc, mode, pgnop));
+ }
+ }
+
+ /*
+ * If we get here, we are not in a duplicate set, and just need
+ * to back up the cursor. There are still three cases:
+ * midpage, beginning of page, beginning of bucket.
+ */
+
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else
+ /*
+ * We are no longer in a dup set; flag this so the dup code
+ * will reinitialize should we stumble upon another one.
+ */
+ F_CLR(hcp, H_ISDUP);
+
+ if (hcp->indx == 0) { /* Beginning of page. */
+ hcp->pgno = PREV_PGNO(hcp->page);
+ if (hcp->pgno == PGNO_INVALID) {
+ /* Beginning of bucket. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ } else if ((ret =
+ __ham_next_cpage(dbc, hcp->pgno, 0)) != 0)
+ return (ret);
+ else
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ /*
+ * Either we've got the cursor set up to be decremented, or we
+ * have to find the end of a bucket.
+ */
+ if (hcp->indx == NDX_INVALID) {
+ DB_ASSERT(hcp->page != NULL);
+
+ hcp->indx = NUM_ENT(hcp->page);
+ for (next_pgno = NEXT_PGNO(hcp->page);
+ next_pgno != PGNO_INVALID;
+ next_pgno = NEXT_PGNO(hcp->page)) {
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ hcp->indx = NUM_ENT(hcp->page);
+ }
+
+ if (hcp->indx == 0) {
+ /* Bucket was empty. */
+ F_SET(hcp, H_NOMORE);
+ return (DB_NOTFOUND);
+ }
+ }
+
+ hcp->indx -= 2;
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * Sets the cursor to the next key/data pair on a page.
+ *
+ * PUBLIC: int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+ */
+int
+__ham_item_next(dbc, mode, pgnop)
+ DBC *dbc;
+ db_lockmode_t mode;
+ db_pgno_t *pgnop;
+{
+ HASH_CURSOR *hcp;
+ int ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_cpage(dbc, mode)) != 0)
+ return (ret);
+
+ /*
+ * Deleted on-page duplicates are a weird case. If we delete the last
+ * one, then our cursor is at the very end of a duplicate set and
+ * we actually need to go on to the next key.
+ */
+ if (F_ISSET(hcp, H_DELETED)) {
+ if (hcp->indx != NDX_INVALID &&
+ F_ISSET(hcp, H_ISDUP) &&
+ HPAGE_TYPE(dbc->dbp, hcp->page, H_DATAINDEX(hcp->indx))
+ == H_DUPLICATE && hcp->dup_tlen == hcp->dup_off) {
+ if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (!F_ISSET(hcp, H_ISDUP) && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else if (F_ISSET(hcp, H_ISDUP) &&
+ F_ISSET(hcp, H_NEXT_NODUP)) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ F_CLR(hcp, H_DELETED);
+ } else if (hcp->indx == NDX_INVALID) {
+ hcp->indx = 0;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_NEXT_NODUP)) {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ } else if (F_ISSET(hcp, H_ISDUP) && hcp->dup_tlen != 0) {
+ if (hcp->dup_off + DUP_SIZE(hcp->dup_len) >=
+ hcp->dup_tlen && F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ }
+ hcp->dup_off += DUP_SIZE(hcp->dup_len);
+ if (hcp->dup_off >= hcp->dup_tlen) {
+ F_CLR(hcp, H_ISDUP);
+ hcp->indx += 2;
+ }
+ } else if (F_ISSET(hcp, H_DUPONLY)) {
+ F_CLR(hcp, H_OK);
+ F_SET(hcp, H_NOMORE);
+ return (0);
+ } else {
+ hcp->indx += 2;
+ F_CLR(hcp, H_ISDUP);
+ }
+
+ return (__ham_item(dbc, mode, pgnop));
+}
+
+/*
+ * PUBLIC: void __ham_putitem __P((DB *, PAGE *p, const DBT *, int));
+ *
+ * This is a little bit sleazy in that we're overloading the meaning
+ * of the H_OFFPAGE type here. When we recover deletes, we have the
+ * entire entry instead of having only the DBT, so we'll pass type
+ * H_OFFPAGE to mean, "copy the whole entry" as opposed to constructing
+ * an H_KEYDATA around it.
+ */
+void
+__ham_putitem(dbp, p, dbt, type)
+ DB *dbp;
+ PAGE *p;
+ const DBT *dbt;
+ int type;
+{
+ u_int16_t n, off;
+ db_indx_t *inp;
+
+ n = NUM_ENT(p);
+ inp = P_INP(dbp, p);
+
+ /* Put the item element on the page. */
+ if (type == H_OFFPAGE) {
+ off = HOFFSET(p) - dbt->size;
+ HOFFSET(p) = inp[n] = off;
+ memcpy(P_ENTRY(dbp, p, n), dbt->data, dbt->size);
+ } else {
+ off = HOFFSET(p) - HKEYDATA_SIZE(dbt->size);
+ HOFFSET(p) = inp[n] = off;
+ PUT_HKEYDATA(P_ENTRY(dbp, p, n), dbt->data, dbt->size, type);
+ }
+
+ /* Adjust page info. */
+ NUM_ENT(p) += 1;
+}
+
+/*
+ * PUBLIC: void __ham_reputpair __P((DB *, PAGE *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *));
+ *
+ * This is a special case to restore a key/data pair to its original
+ * location during recovery. We are guaranteed that the pair fits
+ * on the page and is not the last pair on the page (because if it's
+ * the last pair, the normal insert works).
+ */
+void
+__ham_reputpair(dbp, p, ndx, key, data)
+ DB *dbp;
+ PAGE *p;
+ u_int32_t ndx;
+ const DBT *key, *data;
+{
+ db_indx_t i, *inp, movebytes, newbytes;
+ size_t psize;
+ u_int8_t *from;
+
+ psize = dbp->pgsize;
+ inp = P_INP(dbp, p);
+ /* First shuffle the existing items up on the page. */
+ movebytes = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - HOFFSET(p));
+ newbytes = key->size + data->size;
+ from = (u_int8_t *)p + HOFFSET(p);
+ memmove(from - newbytes, from, movebytes);
+
+ /*
+ * Adjust the indices and move them up 2 spaces. Note that we
+ * have to check the exit condition inside the loop just in case
+ * we are dealing with index 0 (db_indx_t's are unsigned).
+ */
+ for (i = NUM_ENT(p) - 1; ; i-- ) {
+ inp[i + 2] = inp[i] - newbytes;
+ if (i == H_KEYINDEX(ndx))
+ break;
+ }
+
+ /* Put the key and data on the page. */
+ inp[H_KEYINDEX(ndx)] = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - key->size);
+ inp[H_DATAINDEX(ndx)] = inp[H_KEYINDEX(ndx)] - data->size;
+ memcpy(P_ENTRY(dbp, p, H_KEYINDEX(ndx)), key->data, key->size);
+ memcpy(P_ENTRY(dbp, p, H_DATAINDEX(ndx)), data->data, data->size);
+
+ /* Adjust page info. */
+ HOFFSET(p) -= newbytes;
+ NUM_ENT(p) += 2;
+}
+
+/*
+ * PUBLIC: int __ham_del_pair __P((DBC *, int));
+ */
+int
+__ham_del_pair(dbc, reclaim_page)
+ DBC *dbc;
+ int reclaim_page;
+{
+ DB *dbp;
+ DBT data_dbt, key_dbt;
+ DB_LSN new_lsn, *n_lsn, tmp_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *n_pagep, *nn_pagep, *p, *p_pagep;
+ db_ham_mode op;
+ db_indx_t ndx;
+ db_pgno_t chg_pgno, pgno, tmp_pgno;
+ int ret, t_ret;
+ u_int32_t order;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ n_pagep = p_pagep = nn_pagep = NULL;
+ ndx = hcp->indx;
+
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ p = hcp->page;
+
+ /*
+ * We optimize for the normal case which is when neither the key nor
+ * the data are large. In this case, we write a single log record
+ * and do the delete. If either is large, we'll call __big_delete
+ * to remove the big item and then update the page to remove the
+ * entry referring to the big item.
+ */
+ ret = 0;
+ if (HPAGE_PTYPE(H_PAIRKEY(dbp, p, ndx)) == H_OFFPAGE) {
+ memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_KEYINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ }
+
+ if (ret == 0)
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, p, ndx))) {
+ case H_OFFPAGE:
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_DATAINDEX(ndx))),
+ sizeof(db_pgno_t));
+ ret = __db_doff(dbc, pgno);
+ break;
+ case H_OFFDUP:
+ case H_DUPLICATE:
+ /*
+ * If we delete a pair that is/was a duplicate, then
+ * we had better clear the flag so that we update the
+ * cursor appropriately.
+ */
+ F_CLR(hcp, H_ISDUP);
+ break;
+ }
+
+ if (ret)
+ return (ret);
+
+ /* Now log the delete off this page. */
+ if (DBC_LOGGING(dbc)) {
+ key_dbt.data = P_ENTRY(dbp, p, H_KEYINDEX(ndx));
+ key_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_KEYINDEX(ndx));
+ data_dbt.data = P_ENTRY(dbp, p, H_DATAINDEX(ndx));
+ data_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_DATAINDEX(ndx));
+
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, DELPAIR, PGNO(p), (u_int32_t)ndx,
+ &LSN(p), &key_dbt, &data_dbt)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn;
+
+ /* Do the delete. */
+ __ham_dpair(dbp, p, ndx);
+
+ /*
+ * Mark item deleted so that we don't try to return it, and
+ * so that we update the cursor correctly on the next call
+ * to next.
+ */
+ F_SET(hcp, H_DELETED);
+ F_CLR(hcp, H_OK);
+
+ /*
+ * Update cursors that are on the page where the delete happend.
+ */
+ if ((ret = __ham_c_update(dbc, 0, 0, 0)) != 0)
+ return (ret);
+
+ /*
+ * If we are locking, we will not maintain this, because it is
+ * a hot spot.
+ *
+ * XXX
+ * Perhaps we can retain incremental numbers and apply them later.
+ */
+ if (!STD_LOCKING(dbc)) {
+ --hcp->hdr->nelem;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
+
+ /*
+ * If we need to reclaim the page, then check if the page is empty.
+ * There are two cases. If it's empty and it's not the first page
+ * in the bucket (i.e., the bucket page) then we can simply remove
+ * it. If it is the first chain in the bucket, then we need to copy
+ * the second page into it and remove the second page.
+ * If its the only page in the bucket we leave it alone.
+ */
+ if (!reclaim_page ||
+ NUM_ENT(p) != 0 ||
+ (PREV_PGNO(p) == PGNO_INVALID && NEXT_PGNO(p) == PGNO_INVALID))
+ return (mpf->set(mpf, p, DB_MPOOL_DIRTY));
+
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ /*
+ * First page in chain is empty and we know that there
+ * are more pages in the chain.
+ */
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ return (ret);
+
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID && (ret =
+ mpf->get(mpf, &NEXT_PGNO(n_pagep), 0, &nn_pagep)) != 0)
+ goto err;
+
+ if (DBC_LOGGING(dbc)) {
+ key_dbt.data = n_pagep;
+ key_dbt.size = dbp->pgsize;
+ if ((ret = __ham_copypage_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(p),
+ &LSN(p), PGNO(n_pagep), &LSN(n_pagep),
+ NEXT_PGNO(n_pagep),
+ nn_pagep == NULL ? NULL : &LSN(nn_pagep),
+ &key_dbt)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn; /* Structure assignment. */
+ LSN(n_pagep) = new_lsn;
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID)
+ LSN(nn_pagep) = new_lsn;
+
+ if (nn_pagep != NULL) {
+ PREV_PGNO(nn_pagep) = PGNO(p);
+ if ((ret =
+ mpf->put(mpf, nn_pagep, DB_MPOOL_DIRTY)) != 0) {
+ nn_pagep = NULL;
+ goto err;
+ }
+ }
+
+ tmp_pgno = PGNO(p);
+ tmp_lsn = LSN(p);
+ memcpy(p, n_pagep, dbp->pgsize);
+ PGNO(p) = tmp_pgno;
+ LSN(p) = tmp_lsn;
+ PREV_PGNO(p) = PGNO_INVALID;
+
+ /*
+ * Update cursors to reflect the fact that records
+ * on the second page have moved to the first page.
+ */
+ if ((ret = __ham_c_delpg(dbc, PGNO(n_pagep),
+ PGNO(p), 0, DB_HAM_DELFIRSTPG, &order)) != 0)
+ goto err;
+
+ /*
+ * Update the cursor to reflect its new position.
+ */
+ hcp->indx = 0;
+ hcp->pgno = PGNO(p);
+ hcp->order += order;
+
+ if ((ret = mpf->set(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if ((ret = __db_free(dbc, n_pagep)) != 0) {
+ n_pagep = NULL;
+ goto err;
+ }
+ } else {
+ if ((ret = mpf->get(mpf, &PREV_PGNO(p), 0, &p_pagep)) != 0)
+ goto err;
+
+ if (NEXT_PGNO(p) != PGNO_INVALID) {
+ if ((ret =
+ mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ goto err;
+ n_lsn = &LSN(n_pagep);
+ } else {
+ n_pagep = NULL;
+ n_lsn = NULL;
+ }
+
+ NEXT_PGNO(p_pagep) = NEXT_PGNO(p);
+ if (n_pagep != NULL)
+ PREV_PGNO(n_pagep) = PGNO(p_pagep);
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn,
+ &new_lsn, 0, DELOVFL, PREV_PGNO(p), &LSN(p_pagep),
+ PGNO(p), &LSN(p), NEXT_PGNO(p), n_lsn)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p_pagep) = new_lsn; /* Structure assignment. */
+ if (n_pagep)
+ LSN(n_pagep) = new_lsn;
+ LSN(p) = new_lsn;
+
+ if (NEXT_PGNO(p) == PGNO_INVALID) {
+ /*
+ * There is no next page; put the cursor on the
+ * previous page as if we'd deleted the last item
+ * on that page, with index after the last valid
+ * entry.
+ *
+ * The deleted flag was set up above.
+ */
+ hcp->pgno = PGNO(p_pagep);
+ hcp->indx = NUM_ENT(p_pagep);
+ op = DB_HAM_DELLASTPG;
+ } else {
+ /*
+ * There is a next page, so put the cursor at
+ * the beginning of it.
+ */
+ hcp->pgno = NEXT_PGNO(p);
+ hcp->indx = 0;
+ op = DB_HAM_DELMIDPG;
+ }
+
+ /*
+ * Since we are about to delete the cursor page and we have
+ * just moved the cursor, we need to make sure that the
+ * old page pointer isn't left hanging around in the cursor.
+ */
+ hcp->page = NULL;
+ chg_pgno = PGNO(p);
+ ret = __db_free(dbc, p);
+ if ((t_ret =
+ mpf->put(mpf, p_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (n_pagep != NULL && (t_ret =
+ mpf->put(mpf, n_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ if ((ret = __ham_c_delpg(dbc,
+ chg_pgno, hcp->pgno, hcp->indx, op, &order)) != 0)
+ return (ret);
+ hcp->order += order;
+ }
+ return (ret);
+
+err: /* Clean up any pages. */
+ if (n_pagep != NULL)
+ (void)mpf->put(mpf, n_pagep, 0);
+ if (nn_pagep != NULL)
+ (void)mpf->put(mpf, nn_pagep, 0);
+ if (p_pagep != NULL)
+ (void)mpf->put(mpf, p_pagep, 0);
+ return (ret);
+}
+
+/*
+ * __ham_replpair --
+ * Given the key data indicated by the cursor, replace part/all of it
+ * according to the fields in the dbt.
+ *
+ * PUBLIC: int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+ */
+int
+__ham_replpair(dbc, dbt, make_dup)
+ DBC *dbc;
+ DBT *dbt;
+ u_int32_t make_dup;
+{
+ DB *dbp;
+ DBT old_dbt, tdata, tmp;
+ DB_ENV *dbenv;
+ DB_LSN new_lsn;
+ HASH_CURSOR *hcp;
+ int32_t change; /* XXX: Possible overflow. */
+ u_int32_t dup_flag, len, memsize;
+ int beyond_eor, is_big, ret, type;
+ u_int8_t *beg, *dest, *end, *hk, *src;
+ void *memp;
+
+ /*
+ * Big item replacements are handled in generic code.
+ * Items that fit on the current page fall into 4 classes.
+ * 1. On-page element, same size
+ * 2. On-page element, new is bigger (fits)
+ * 3. On-page element, new is bigger (does not fit)
+ * 4. On-page element, old is bigger
+ * Numbers 1, 2, and 4 are essentially the same (and should
+ * be the common case). We handle case 3 as a delete and
+ * add.
+ */
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ /*
+ * We need to compute the number of bytes that we are adding or
+ * removing from the entry. Normally, we can simply substract
+ * the number of bytes we are replacing (dbt->dlen) from the
+ * number of bytes we are inserting (dbt->size). However, if
+ * we are doing a partial put off the end of a record, then this
+ * formula doesn't work, because we are essentially adding
+ * new bytes.
+ */
+ change = dbt->size - dbt->dlen;
+
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ is_big = HPAGE_PTYPE(hk) == H_OFFPAGE;
+
+ if (is_big)
+ memcpy(&len, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ else
+ len = LEN_HKEYDATA(dbp, hcp->page,
+ dbp->pgsize, H_DATAINDEX(hcp->indx));
+
+ beyond_eor = dbt->doff + dbt->dlen > len;
+ if (beyond_eor)
+ change += dbt->doff + dbt->dlen - len;
+
+ if (change > (int32_t)P_FREESPACE(dbp, hcp->page) ||
+ beyond_eor || is_big) {
+ /*
+ * Case 3 -- two subcases.
+ * A. This is not really a partial operation, but an overwrite.
+ * Simple del and add works.
+ * B. This is a partial and we need to construct the data that
+ * we are really inserting (yuck).
+ * In both cases, we need to grab the key off the page (in
+ * some cases we could do this outside of this routine; for
+ * cleanliness we do it here. If you happen to be on a big
+ * key, this could be a performance hit).
+ */
+ memset(&tmp, 0, sizeof(tmp));
+ if ((ret =
+ __db_ret(dbp, hcp->page, H_KEYINDEX(hcp->indx),
+ &tmp, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ return (ret);
+
+ /* Preserve duplicate info. */
+ dup_flag = F_ISSET(hcp, H_ISDUP);
+ if (dbt->doff == 0 && dbt->dlen == len) {
+ ret = __ham_del_pair(dbc, 0);
+ if (ret == 0)
+ ret = __ham_add_el(dbc,
+ &tmp, dbt, dup_flag ? H_DUPLICATE : H_KEYDATA);
+ } else { /* Case B */
+ type = HPAGE_PTYPE(hk) != H_OFFPAGE ?
+ HPAGE_PTYPE(hk) : H_KEYDATA;
+ memset(&tdata, 0, sizeof(tdata));
+ memp = NULL;
+ memsize = 0;
+ if ((ret = __db_ret(dbp, hcp->page,
+ H_DATAINDEX(hcp->indx), &tdata, &memp, &memsize))
+ != 0)
+ goto err;
+
+ /* Now we can delete the item. */
+ if ((ret = __ham_del_pair(dbc, 0)) != 0) {
+ __os_free(dbenv, memp);
+ goto err;
+ }
+
+ /* Now shift old data around to make room for new. */
+ if (change > 0) {
+ if ((ret = __os_realloc(dbenv,
+ tdata.size + change, &tdata.data)) != 0)
+ return (ret);
+ memp = tdata.data;
+ memsize = tdata.size + change;
+ memset((u_int8_t *)tdata.data + tdata.size,
+ 0, change);
+ }
+ end = (u_int8_t *)tdata.data + tdata.size;
+
+ src = (u_int8_t *)tdata.data + dbt->doff + dbt->dlen;
+ if (src < end && tdata.size > dbt->doff + dbt->dlen) {
+ len = tdata.size - dbt->doff - dbt->dlen;
+ dest = src + change;
+ memmove(dest, src, len);
+ }
+ memcpy((u_int8_t *)tdata.data + dbt->doff,
+ dbt->data, dbt->size);
+ tdata.size += change;
+
+ /* Now add the pair. */
+ ret = __ham_add_el(dbc, &tmp, &tdata, type);
+ __os_free(dbenv, memp);
+ }
+ F_SET(hcp, dup_flag);
+err: return (ret);
+ }
+
+ /*
+ * Set up pointer into existing data. Do it before the log
+ * message so we can use it inside of the log setup.
+ */
+ beg = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ beg += dbt->doff;
+
+ /*
+ * If we are going to have to move bytes at all, figure out
+ * all the parameters here. Then log the call before moving
+ * anything around.
+ */
+ if (DBC_LOGGING(dbc)) {
+ old_dbt.data = beg;
+ old_dbt.size = dbt->dlen;
+ if ((ret = __ham_replace_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(hcp->page),
+ (u_int32_t)H_DATAINDEX(hcp->indx), &LSN(hcp->page),
+ (u_int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0)
+ return (ret);
+
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+
+ __ham_onpage_replace(dbp, hcp->page, (u_int32_t)H_DATAINDEX(hcp->indx),
+ (int32_t)dbt->doff, change, dbt);
+
+ return (0);
+}
+
+/*
+ * Replace data on a page with new data, possibly growing or shrinking what's
+ * there. This is called on two different occasions. On one (from replpair)
+ * we are interested in changing only the data. On the other (from recovery)
+ * we are replacing the entire data (header and all) with a new element. In
+ * the latter case, the off argument is negative.
+ * pagep: the page that we're changing
+ * ndx: page index of the element that is growing/shrinking.
+ * off: Offset at which we are beginning the replacement.
+ * change: the number of bytes (+ or -) that the element is growing/shrinking.
+ * dbt: the new data that gets written at beg.
+ *
+ * PUBLIC: void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t,
+ * PUBLIC: int32_t, int32_t, DBT *));
+ */
+void
+__ham_onpage_replace(dbp, pagep, ndx, off, change, dbt)
+ DB *dbp;
+ PAGE *pagep;
+ u_int32_t ndx;
+ int32_t off;
+ int32_t change;
+ DBT *dbt;
+{
+ db_indx_t i, *inp;
+ int32_t len;
+ size_t pgsize;
+ u_int8_t *src, *dest;
+ int zero_me;
+
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, pagep);
+ if (change != 0) {
+ zero_me = 0;
+ src = (u_int8_t *)(pagep) + HOFFSET(pagep);
+ if (off < 0)
+ len = inp[ndx] - HOFFSET(pagep);
+ else if ((u_int32_t)off >=
+ LEN_HKEYDATA(dbp, pagep, pgsize, ndx)) {
+ len = (int32_t)(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx))
+ + LEN_HKEYDATA(dbp, pagep, pgsize, ndx) - src);
+ zero_me = 1;
+ } else
+ len = (int32_t)(
+ (HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off) -
+ src);
+ dest = src - change;
+ memmove(dest, src, len);
+ if (zero_me)
+ memset(dest + len, 0, change);
+
+ /* Now update the indices. */
+ for (i = ndx; i < NUM_ENT(pagep); i++)
+ inp[i] -= change;
+ HOFFSET(pagep) -= change;
+ }
+ if (off >= 0)
+ memcpy(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off,
+ dbt->data, dbt->size);
+ else
+ memcpy(P_ENTRY(dbp, pagep, ndx), dbt->data, dbt->size);
+}
+
+/*
+ * PUBLIC: int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+ */
+int
+__ham_split_page(dbc, obucket, nbucket)
+ DBC *dbc;
+ u_int32_t obucket, nbucket;
+{
+ DB *dbp;
+ DBC **carray;
+ DBT key, page_dbt;
+ DB_ENV *dbenv;
+ DB_LOCK block;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp, *cp;
+ PAGE **pp, *old_pagep, *temp_pagep, *new_pagep;
+ db_indx_t n;
+ db_pgno_t bucket_pgno, npgno, next_pgno;
+ u_int32_t big_len, len;
+ int found, i, ret, t_ret;
+ void *big_buf;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ temp_pagep = old_pagep = new_pagep = NULL;
+ carray = NULL;
+ LOCK_INIT(block);
+
+ bucket_pgno = BUCKET_TO_PAGE(hcp, obucket);
+ if ((ret = __db_lget(dbc,
+ 0, bucket_pgno, DB_LOCK_WRITE, 0, &block)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf,
+ &bucket_pgno, DB_MPOOL_CREATE, &old_pagep)) != 0)
+ goto err;
+
+ /* Properly initialize the new bucket page. */
+ npgno = BUCKET_TO_PAGE(hcp, nbucket);
+ if ((ret = mpf->get(mpf, &npgno, DB_MPOOL_CREATE, &new_pagep)) != 0)
+ goto err;
+ P_INIT(new_pagep,
+ dbp->pgsize, npgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+
+ temp_pagep = hcp->split_buf;
+ memcpy(temp_pagep, old_pagep, dbp->pgsize);
+
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0, SPLITOLD,
+ PGNO(old_pagep), &page_dbt, &LSN(old_pagep))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(old_pagep) = new_lsn; /* Structure assignment. */
+
+ P_INIT(old_pagep, dbp->pgsize, PGNO(old_pagep), PGNO_INVALID,
+ PGNO_INVALID, 0, P_HASH);
+
+ big_len = 0;
+ big_buf = NULL;
+ key.flags = 0;
+ while (temp_pagep != NULL) {
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(temp_pagep), NDX_INVALID, &carray)) != 0)
+ goto err;
+
+ for (n = 0; n < (db_indx_t)NUM_ENT(temp_pagep); n += 2) {
+ if ((ret = __db_ret(dbp, temp_pagep,
+ H_KEYINDEX(n), &key, &big_buf, &big_len)) != 0)
+ goto err;
+
+ if (__ham_call_hash(dbc, key.data, key.size) == obucket)
+ pp = &old_pagep;
+ else
+ pp = &new_pagep;
+
+ /*
+ * Figure out how many bytes we need on the new
+ * page to store the key/data pair.
+ */
+ len = LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
+ H_DATAINDEX(n)) +
+ LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
+ H_KEYINDEX(n)) +
+ 2 * sizeof(db_indx_t);
+
+ if (P_FREESPACE(dbp, *pp) < len) {
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = *pp;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ SPLITNEW, PGNO(*pp), &page_dbt,
+ &LSN(*pp))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+ LSN(*pp) = new_lsn;
+ if ((ret =
+ __ham_add_ovflpage(dbc, *pp, 1, pp)) != 0)
+ goto err;
+ }
+
+ /* Check if we need to update a cursor. */
+ if (carray != NULL) {
+ found = 0;
+ for (i = 0; carray[i] != NULL; i++) {
+ cp =
+ (HASH_CURSOR *)carray[i]->internal;
+ if (cp->pgno == PGNO(temp_pagep) &&
+ cp->indx == n) {
+ cp->pgno = PGNO(*pp);
+ cp->indx = NUM_ENT(*pp);
+ found = 1;
+ }
+ }
+ if (found && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret =
+ __ham_chgpg_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ DB_HAM_SPLIT, PGNO(temp_pagep),
+ PGNO(*pp), n, NUM_ENT(*pp))) != 0)
+ goto err;
+ }
+ }
+ __ham_copy_item(dbp, temp_pagep, H_KEYINDEX(n), *pp);
+ __ham_copy_item(dbp, temp_pagep, H_DATAINDEX(n), *pp);
+ }
+ next_pgno = NEXT_PGNO(temp_pagep);
+
+ /* Clear temp_page; if it's a link overflow page, free it. */
+ if (PGNO(temp_pagep) != bucket_pgno && (ret =
+ __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ if (next_pgno == PGNO_INVALID)
+ temp_pagep = NULL;
+ else if ((ret = mpf->get(
+ mpf, &next_pgno, DB_MPOOL_CREATE, &temp_pagep)) != 0)
+ goto err;
+
+ if (temp_pagep != NULL) {
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = temp_pagep;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ SPLITOLD, PGNO(temp_pagep),
+ &page_dbt, &LSN(temp_pagep))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+ LSN(temp_pagep) = new_lsn;
+ }
+
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(dbenv, carray);
+ carray = NULL;
+ }
+ if (big_buf != NULL)
+ __os_free(dbenv, big_buf);
+
+ /*
+ * If the original bucket spanned multiple pages, then we've got
+ * a pointer to a page that used to be on the bucket chain. It
+ * should be deleted.
+ */
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno &&
+ (ret = __db_free(dbc, temp_pagep)) != 0) {
+ temp_pagep = NULL;
+ goto err;
+ }
+
+ /*
+ * Write new buckets out.
+ */
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = old_pagep;
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn,
+ &new_lsn, 0, SPLITNEW, PGNO(old_pagep), &page_dbt,
+ &LSN(old_pagep))) != 0)
+ goto err;
+ LSN(old_pagep) = new_lsn;
+
+ page_dbt.data = new_pagep;
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn, &new_lsn, 0,
+ SPLITNEW, PGNO(new_pagep), &page_dbt,
+ &LSN(new_pagep))) != 0)
+ goto err;
+ LSN(new_pagep) = new_lsn;
+ } else {
+ LSN_NOT_LOGGED(LSN(old_pagep));
+ LSN_NOT_LOGGED(LSN(new_pagep));
+ }
+
+ ret = mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
+ if ((t_ret =
+ mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (0) {
+err: if (old_pagep != NULL)
+ (void)mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
+ if (new_pagep != NULL)
+ (void)mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY);
+ if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno)
+ (void)mpf->put(mpf, temp_pagep, DB_MPOOL_DIRTY);
+ }
+ if (LOCK_ISSET(block))
+ __TLPUT(dbc, block);
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(dbenv, carray);
+ return (ret);
+}
+
+/*
+ * Add the given pair to the page. The page in question may already be
+ * held (i.e. it was already gotten). If it is, then the page is passed
+ * in via the pagep parameter. On return, pagep will contain the page
+ * to which we just added something. This allows us to link overflow
+ * pages and return the new page having correctly put the last page.
+ *
+ * PUBLIC: int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+ */
+int
+__ham_add_el(dbc, key, val, type)
+ DBC *dbc;
+ const DBT *key, *val;
+ int type;
+{
+ const DBT *pkey, *pdata;
+ DB *dbp;
+ DBT key_dbt, data_dbt;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HOFFPAGE doff, koff;
+ db_pgno_t next_pgno, pgno;
+ u_int32_t data_size, key_size, pairsize, rectype;
+ int do_expand, is_keybig, is_databig, ret;
+ int key_type, data_type;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ do_expand = 0;
+
+ pgno = hcp->seek_found_page != PGNO_INVALID ?
+ hcp->seek_found_page : hcp->pgno;
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+
+ key_size = HKEYDATA_PSIZE(key->size);
+ data_size = HKEYDATA_PSIZE(val->size);
+ is_keybig = ISBIG(hcp, key->size);
+ is_databig = ISBIG(hcp, val->size);
+ if (is_keybig)
+ key_size = HOFFPAGE_PSIZE;
+ if (is_databig)
+ data_size = HOFFPAGE_PSIZE;
+
+ pairsize = key_size + data_size;
+
+ /* Advance to first page in chain with room for item. */
+ while (H_NUMPAIRS(hcp->page) && NEXT_PGNO(hcp->page) != PGNO_INVALID) {
+ /*
+ * This may not be the end of the chain, but the pair may fit
+ * anyway. Check if it's a bigpair that fits or a regular
+ * pair that fits.
+ */
+ if (P_FREESPACE(dbp, hcp->page) >= pairsize)
+ break;
+ next_pgno = NEXT_PGNO(hcp->page);
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ return (ret);
+ }
+
+ /*
+ * Check if we need to allocate a new page.
+ */
+ if (P_FREESPACE(dbp, hcp->page) < pairsize) {
+ do_expand = 1;
+ if ((ret = __ham_add_ovflpage(dbc,
+ (PAGE *)hcp->page, 1, (PAGE **)&hcp->page)) != 0)
+ return (ret);
+ hcp->pgno = PGNO(hcp->page);
+ }
+
+ /*
+ * Update cursor.
+ */
+ hcp->indx = NUM_ENT(hcp->page);
+ F_CLR(hcp, H_DELETED);
+ if (is_keybig) {
+ koff.type = H_OFFPAGE;
+ UMRW_SET(koff.unused[0]);
+ UMRW_SET(koff.unused[1]);
+ UMRW_SET(koff.unused[2]);
+ if ((ret = __db_poff(dbc, key, &koff.pgno)) != 0)
+ return (ret);
+ koff.tlen = key->size;
+ key_dbt.data = &koff;
+ key_dbt.size = sizeof(koff);
+ pkey = &key_dbt;
+ key_type = H_OFFPAGE;
+ } else {
+ pkey = key;
+ key_type = H_KEYDATA;
+ }
+
+ if (is_databig) {
+ doff.type = H_OFFPAGE;
+ UMRW_SET(doff.unused[0]);
+ UMRW_SET(doff.unused[1]);
+ UMRW_SET(doff.unused[2]);
+ if ((ret = __db_poff(dbc, val, &doff.pgno)) != 0)
+ return (ret);
+ doff.tlen = val->size;
+ data_dbt.data = &doff;
+ data_dbt.size = sizeof(doff);
+ pdata = &data_dbt;
+ data_type = H_OFFPAGE;
+ } else {
+ pdata = val;
+ data_type = type;
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ rectype = PUTPAIR;
+ if (is_databig)
+ rectype |= PAIR_DATAMASK;
+ if (is_keybig)
+ rectype |= PAIR_KEYMASK;
+ if (type == H_DUPLICATE)
+ rectype |= PAIR_DUPMASK;
+
+ if ((ret = __ham_insdel_log(dbp, dbc->txn, &new_lsn, 0,
+ rectype, PGNO(hcp->page), (u_int32_t)NUM_ENT(hcp->page),
+ &LSN(hcp->page), pkey, pdata)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
+
+ __ham_putitem(dbp, hcp->page, pkey, key_type);
+ __ham_putitem(dbp, hcp->page, pdata, data_type);
+
+ /*
+ * For splits, we are going to update item_info's page number
+ * field, so that we can easily return to the same page the
+ * next time we come in here. For other operations, this shouldn't
+ * matter, since odds are this is the last thing that happens before
+ * we return to the user program.
+ */
+ hcp->pgno = PGNO(hcp->page);
+
+ /*
+ * XXX
+ * Maybe keep incremental numbers here.
+ */
+ if (!STD_LOCKING(dbc)) {
+ hcp->hdr->nelem++;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
+
+ if (do_expand || (hcp->hdr->ffactor != 0 &&
+ (u_int32_t)H_NUMPAIRS(hcp->page) > hcp->hdr->ffactor))
+ F_SET(hcp, H_EXPAND);
+ return (0);
+}
+
+/*
+ * Special __putitem call used in splitting -- copies one entry to
+ * another. Works for all types of hash entries (H_OFFPAGE, H_KEYDATA,
+ * H_DUPLICATE, H_OFFDUP). Since we log splits at a high level, we
+ * do not need to do any logging here.
+ *
+ * PUBLIC: void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *));
+ */
+void
+__ham_copy_item(dbp, src_page, src_ndx, dest_page)
+ DB *dbp;
+ PAGE *src_page;
+ u_int32_t src_ndx;
+ PAGE *dest_page;
+{
+ u_int32_t len;
+ size_t pgsize;
+ void *src, *dest;
+ db_indx_t *inp;
+
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, dest_page);
+ /*
+ * Copy the key and data entries onto this new page.
+ */
+ src = P_ENTRY(dbp, src_page, src_ndx);
+
+ /* Set up space on dest. */
+ len = (u_int32_t)LEN_HITEM(dbp, src_page, pgsize, src_ndx);
+ HOFFSET(dest_page) -= len;
+ inp[NUM_ENT(dest_page)] = HOFFSET(dest_page);
+ dest = P_ENTRY(dbp, dest_page, NUM_ENT(dest_page));
+ NUM_ENT(dest_page)++;
+
+ memcpy(dest, src, len);
+}
+
+/*
+ *
+ * Returns:
+ * pointer on success
+ * NULL on error
+ *
+ * PUBLIC: int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+ */
+int
+__ham_add_ovflpage(dbc, pagep, release, pp)
+ DBC *dbc;
+ PAGE *pagep;
+ int release;
+ PAGE **pp;
+{
+ DB *dbp;
+ DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *new_pagep;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
+ if ((ret = __db_new(dbc, P_HASH, &new_pagep)) != 0)
+ return (ret);
+
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn, &new_lsn, 0,
+ PUTOVFL, PGNO(pagep), &LSN(pagep),
+ PGNO(new_pagep), &LSN(new_pagep), PGNO_INVALID, NULL)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(pagep) = LSN(new_pagep) = new_lsn;
+ NEXT_PGNO(pagep) = PGNO(new_pagep);
+
+ PREV_PGNO(new_pagep) = PGNO(pagep);
+
+ if (release)
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
+
+ *pp = new_pagep;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __ham_get_cpage __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_get_cpage(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ DB *dbp;
+ DB_LOCK tmp_lock;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * There are four cases with respect to buckets and locks.
+ * 1. If there is no lock held, then if we are locking, we should
+ * get the lock.
+ * 2. If there is a lock held, it's for the current bucket, and it's
+ * for the right mode, we don't need to do anything.
+ * 3. If there is a lock held for the current bucket but it's not
+ * strong enough, we need to upgrade.
+ * 4. If there is a lock, but it's for a different bucket, then we need
+ * to release the existing lock and get a new lock.
+ */
+ LOCK_INIT(tmp_lock);
+ if (STD_LOCKING(dbc)) {
+ if (hcp->lbucket != hcp->bucket && /* Case 4 */
+ (ret = __TLPUT(dbc, hcp->lock)) != 0)
+ return (ret);
+
+ if ((LOCK_ISSET(hcp->lock) &&
+ (hcp->lock_mode == DB_LOCK_READ &&
+ mode == DB_LOCK_WRITE))) {
+ /* Case 3. */
+ tmp_lock = hcp->lock;
+ LOCK_INIT(hcp->lock);
+ }
+
+ /* Acquire the lock. */
+ if (!LOCK_ISSET(hcp->lock))
+ /* Cases 1, 3, and 4. */
+ if ((ret = __ham_lock_bucket(dbc, mode)) != 0)
+ return (ret);
+
+ if (ret == 0) {
+ hcp->lock_mode = mode;
+ hcp->lbucket = hcp->bucket;
+ if (LOCK_ISSET(tmp_lock))
+ /* Case 3: release the original lock. */
+ ret =
+ dbp->dbenv->lock_put(dbp->dbenv, &tmp_lock);
+ } else if (LOCK_ISSET(tmp_lock))
+ hcp->lock = tmp_lock;
+ }
+
+ if (ret == 0 && hcp->page == NULL) {
+ if (hcp->pgno == PGNO_INVALID)
+ hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if ((ret = mpf->get(mpf,
+ &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Get a new page at the cursor, putting the last page if necessary.
+ * If the flag is set to H_ISDUP, then we are talking about the
+ * duplicate page, not the main page.
+ *
+ * PUBLIC: int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+ */
+int
+__ham_next_cpage(dbc, pgno, dirty)
+ DBC *dbc;
+ db_pgno_t pgno;
+ int dirty;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *p;
+ int ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if (hcp->page != NULL &&
+ (ret = mpf->put(mpf, hcp->page, dirty ? DB_MPOOL_DIRTY : 0)) != 0)
+ return (ret);
+ hcp->page = NULL;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &p)) != 0)
+ return (ret);
+
+ hcp->page = p;
+ hcp->pgno = pgno;
+ hcp->indx = 0;
+
+ return (0);
+}
+
+/*
+ * __ham_lock_bucket --
+ * Get the lock on a particular bucket.
+ *
+ * PUBLIC: int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+ */
+int
+__ham_lock_bucket(dbc, mode)
+ DBC *dbc;
+ db_lockmode_t mode;
+{
+ HASH_CURSOR *hcp;
+ db_pgno_t pgno;
+ int gotmeta, ret;
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ gotmeta = hcp->hdr == NULL ? 1 : 0;
+ if (gotmeta)
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+ pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ if (gotmeta)
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ return (ret);
+
+ ret = __db_lget(dbc, 0, pgno, mode, 0, &hcp->lock);
+
+ hcp->lock_mode = mode;
+ return (ret);
+}
+
+/*
+ * __ham_dpair --
+ * Delete a pair on a page, paying no attention to what the pair
+ * represents. The caller is responsible for freeing up duplicates
+ * or offpage entries that might be referenced by this pair.
+ *
+ * Recovery assumes that this may be called without the metadata
+ * page pinned.
+ *
+ * PUBLIC: void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+ */
+void
+__ham_dpair(dbp, p, indx)
+ DB *dbp;
+ PAGE *p;
+ u_int32_t indx;
+{
+ db_indx_t delta, n, *inp;
+ u_int8_t *dest, *src;
+
+ inp = P_INP(dbp, p);
+ /*
+ * Compute "delta", the amount we have to shift all of the
+ * offsets. To find the delta, we just need to calculate
+ * the size of the pair of elements we are removing.
+ */
+ delta = H_PAIRSIZE(dbp, p, dbp->pgsize, indx);
+
+ /*
+ * The hard case: we want to remove something other than
+ * the last item on the page. We need to shift data and
+ * offsets down.
+ */
+ if ((db_indx_t)indx != NUM_ENT(p) - 2) {
+ /*
+ * Move the data: src is the first occupied byte on
+ * the page. (Length is delta.)
+ */
+ src = (u_int8_t *)p + HOFFSET(p);
+
+ /*
+ * Destination is delta bytes beyond src. This might
+ * be an overlapping copy, so we have to use memmove.
+ */
+ dest = src + delta;
+ memmove(dest, src, inp[H_DATAINDEX(indx)] - HOFFSET(p));
+ }
+
+ /* Adjust page metadata. */
+ HOFFSET(p) = HOFFSET(p) + delta;
+ NUM_ENT(p) = NUM_ENT(p) - 2;
+
+ /* Adjust the offsets. */
+ for (n = (db_indx_t)indx; n < (db_indx_t)(NUM_ENT(p)); n++)
+ inp[n] = inp[n + 2] + delta;
+
+}
+
+/*
+ * __ham_c_delpg --
+ *
+ * Adjust the cursors after we've emptied a page in a bucket, taking
+ * care that when we move cursors pointing to deleted items, their
+ * orders don't collide with the orders of cursors on the page we move
+ * them to (since after this function is called, cursors with the same
+ * index on the two pages will be otherwise indistinguishable--they'll
+ * all have pgno new_pgno). There are three cases:
+ *
+ * 1) The emptied page is the first page in the bucket. In this
+ * case, we've copied all the items from the second page into the
+ * first page, so the first page is new_pgno and the second page is
+ * old_pgno. new_pgno is empty, but can have deleted cursors
+ * pointing at indx 0, so we need to be careful of the orders
+ * there. This is DB_HAM_DELFIRSTPG.
+ *
+ * 2) The page is somewhere in the middle of a bucket. Our caller
+ * can just delete such a page, so it's old_pgno. old_pgno is
+ * empty, but may have deleted cursors pointing at indx 0, so we
+ * need to be careful of indx 0 when we move those cursors to
+ * new_pgno. This is DB_HAM_DELMIDPG.
+ *
+ * 3) The page is the last in a bucket. Again the empty page is
+ * old_pgno, and again it should only have cursors that are deleted
+ * and at indx == 0. This time, though, there's no next page to
+ * move them to, so we set them to indx == num_ent on the previous
+ * page--and indx == num_ent is the index whose cursors we need to
+ * be careful of. This is DB_HAM_DELLASTPG.
+ */
+static int
+__ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t num_ent;
+ db_ham_mode op;
+ u_int32_t *orderp;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+ db_indx_t indx;
+ u_int32_t order;
+
+ /* Which is the worrisome index? */
+ indx = (op == DB_HAM_DELLASTPG) ? num_ent : 0;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Find the highest order of any cursor our movement
+ * may collide with.
+ */
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ hcp = (HASH_CURSOR *)cp->internal;
+ if (hcp->pgno == new_pgno) {
+ if (hcp->indx == indx &&
+ F_ISSET(hcp, H_DELETED) &&
+ hcp->order >= order)
+ order = hcp->order + 1;
+ DB_ASSERT(op != DB_HAM_DELFIRSTPG ||
+ hcp->indx == NDX_INVALID ||
+ (hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED)));
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+
+ if (hcp->pgno == old_pgno) {
+ switch (op) {
+ case DB_HAM_DELFIRSTPG:
+ /*
+ * We're moving all items,
+ * regardless of index.
+ */
+ hcp->pgno = new_pgno;
+
+ /*
+ * But we have to be careful of
+ * the order values.
+ */
+ if (hcp->indx == indx)
+ hcp->order += order;
+ break;
+ case DB_HAM_DELMIDPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->order += order;
+ break;
+ case DB_HAM_DELLASTPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->indx = indx;
+ hcp->order += order;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (__db_panic(dbenv, EINVAL));
+ }
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, op,
+ old_pgno, new_pgno, indx, order)) != 0)
+ return (ret);
+ }
+ *orderp = order;
+ return (0);
+}
diff --git a/storage/bdb/hash/hash_rec.c b/storage/bdb/hash/hash_rec.c
new file mode 100644
index 00000000000..24d3473c508
--- /dev/null
+++ b/storage/bdb/hash/hash_rec.c
@@ -0,0 +1,1156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_rec.c,v 11.69 2002/09/03 14:12:49 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+
+static int __ham_alloc_pages __P((DB *, __ham_groupalloc_args *, DB_LSN *));
+
+/*
+ * __ham_insdel_recover --
+ *
+ * PUBLIC: int __ham_insdel_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_insdel_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags, opcode;
+ int cmp_n, cmp_p, ret, type;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_insdel_print);
+ REC_INTRO(__ham_insdel_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+ /*
+ * Two possible things going on:
+ * redo a delete/undo a put: delete the item from the page.
+ * redo a put/undo a delete: add the item to the page.
+ * If we are undoing a delete, then the information logged is the
+ * entire entry off the page, not just the data of a dbt. In
+ * this case, we want to copy it back onto the page verbatim.
+ * We do this by calling __putitem with the type H_OFFPAGE instead
+ * of H_KEYDATA.
+ */
+ opcode = OPCODE_OF(argp->opcode);
+
+ flags = 0;
+ if ((opcode == DELPAIR && cmp_n == 0 && DB_UNDO(op)) ||
+ (opcode == PUTPAIR && cmp_p == 0 && DB_REDO(op))) {
+ /*
+ * Need to redo a PUT or undo a delete. If we are undoing a
+ * delete, we've got to restore the item back to its original
+ * position. That's a royal pain in the butt (because we do
+ * not store item lengths on the page), but there's no choice.
+ */
+ if (opcode != DELPAIR ||
+ argp->ndx == (u_int32_t)NUM_ENT(pagep)) {
+ __ham_putitem(file_dbp, pagep, &argp->key,
+ DB_UNDO(op) || PAIR_ISKEYBIG(argp->opcode) ?
+ H_OFFPAGE : H_KEYDATA);
+
+ if (PAIR_ISDATADUP(argp->opcode))
+ type = H_DUPLICATE;
+ else if (DB_UNDO(op) || PAIR_ISDATABIG(argp->opcode))
+ type = H_OFFPAGE;
+ else
+ type = H_KEYDATA;
+ __ham_putitem(file_dbp, pagep, &argp->data, type);
+ } else
+ (void)__ham_reputpair(file_dbp, pagep,
+ argp->ndx, &argp->key, &argp->data);
+
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+
+ } else if ((opcode == DELPAIR && cmp_p == 0 && DB_REDO(op)) ||
+ (opcode == PUTPAIR && cmp_n == 0 && DB_UNDO(op))) {
+ /* Need to undo a put or redo a delete. */
+ __ham_dpair(file_dbp, pagep, argp->ndx);
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Return the previous LSN. */
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_newpage_recover --
+ * This log message is used when we add/remove overflow pages. This
+ * message takes care of the pointer chains, not the data on the pages.
+ *
+ * PUBLIC: int __ham_newpage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_newpage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_newpage_print);
+ REC_INTRO(__ham_newpage_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->new_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto ppage;
+ } else if ((ret = mpf->get(mpf,
+ &argp->new_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /*
+ * There are potentially three pages we need to check: the one
+ * that we created/deleted, the one before it and the one after
+ * it.
+ */
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ P_INIT(pagep, file_dbp->pgsize, argp->new_pgno,
+ argp->prev_pgno, argp->next_pgno, 0, P_HASH);
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /*
+ * Redo a delete or undo a create new page. All we
+ * really need to do is change the LSN.
+ */
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Now do the prev page. */
+ppage: if (argp->prev_pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ ret = 0;
+ goto npage;
+ } else if ((ret = mpf->get(mpf,
+ &argp->prev_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->next_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->next_pgno = argp->next_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+ }
+
+ /* Now time to do the next page */
+npage: if (argp->next_pgno != PGNO_INVALID) {
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist.
+ * That is equivalent to having a pagelsn of 0,
+ * so we would not have to undo anything. In
+ * this case, don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+
+ if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == PUTOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DELOVFL)) {
+ /* Redo a create new page or undo a delete new page. */
+ pagep->prev_pgno = argp->new_pgno;
+ flags = DB_MPOOL_DIRTY;
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
+ (cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
+ /* Redo a delete or undo a create new page. */
+ pagep->prev_pgno = argp->prev_pgno;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags)
+ LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+ }
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_replace_recover --
+ * This log message refers to partial puts that are local to a single
+ * page. You can think of them as special cases of the more general
+ * insdel log message.
+ *
+ * PUBLIC: int __ham_replace_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_replace_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_replace_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ DBT dbt;
+ PAGE *pagep;
+ u_int32_t flags;
+ int32_t grow;
+ int cmp_n, cmp_p, ret;
+ u_int8_t *hk;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_replace_print);
+ REC_INTRO(__ham_replace_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ memset(&dbt, 0, sizeof(dbt));
+ flags = 0;
+ grow = 1;
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Reapply the change as specified. */
+ dbt.data = argp->newitem.data;
+ dbt.size = argp->newitem.size;
+ grow = argp->newitem.size - argp->olditem.size;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the already applied change. */
+ dbt.data = argp->olditem.data;
+ dbt.size = argp->olditem.size;
+ grow = argp->olditem.size - argp->newitem.size;
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+
+ if (flags) {
+ __ham_onpage_replace(file_dbp, pagep,
+ argp->ndx, argp->off, grow, &dbt);
+ if (argp->makedup) {
+ hk = P_ENTRY(file_dbp, pagep, argp->ndx);
+ if (DB_REDO(op))
+ HPAGE_PTYPE(hk) = H_DUPLICATE;
+ else
+ HPAGE_PTYPE(hk) = H_KEYDATA;
+ }
+ }
+
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_splitdata_recover --
+ *
+ * PUBLIC: int __ham_splitdata_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_splitdata_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_splitdata_print);
+ REC_INTRO(__ham_splitdata_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ /*
+ * There are two types of log messages here, one for the old page
+ * and one for the new pages created. The original image in the
+ * SPLITOLD record is used for undo. The image in the SPLITNEW
+ * is used for redo. We should never have a case where there is
+ * a redo operation and the SPLITOLD record is on disk, but not
+ * the SPLITNEW record. Therefore, we only have work to do when
+ * redo NEW messages and undo OLD messages, but we have to update
+ * LSNs in both cases.
+ */
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ if (argp->opcode == SPLITNEW)
+ /* Need to redo the split described. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ if (argp->opcode == SPLITOLD) {
+ /* Put back the old image. */
+ memcpy(pagep, argp->pageimage.data,
+ argp->pageimage.size);
+ } else
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno,
+ PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_copypage_recover --
+ * Recovery function for copypage.
+ *
+ * PUBLIC: int __ham_copypage_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_copypage_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+
+ pagep = NULL;
+ COMPQUIET(info, NULL);
+
+ REC_PRINT(__ham_copypage_print);
+ REC_INTRO(__ham_copypage_read, 1);
+
+ flags = 0;
+
+ /* This is the bucket page. */
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto donext;
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ PGNO(pagep) = argp->pgno;
+ PREV_PGNO(pagep) = PGNO_INVALID;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ P_INIT(pagep, file_dbp->pgsize, argp->pgno, PGNO_INVALID,
+ argp->next_pgno, 0, P_HASH);
+ LSN(pagep) = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+donext: /* Now fix up the "next" page. */
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ ret = 0;
+ goto do_nn;
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ /* For REDO just update the LSN. For UNDO copy page back. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nextlsn);
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->page.data, argp->page.size);
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Now fix up the next's next page. */
+do_nn: if (argp->nnext_pgno == PGNO_INVALID)
+ goto done;
+
+ if ((ret = mpf->get(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) {
+ if (DB_UNDO(op)) {
+ /*
+ * We are undoing and the page doesn't exist. That
+ * is equivalent to having a pagelsn of 0, so we
+ * would not have to undo anything. In this case,
+ * don't bother creating a page.
+ */
+ goto done;
+ } else if ((ret = mpf->get(mpf,
+ &argp->nnext_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->nnextlsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->nnextlsn);
+
+ flags = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ PREV_PGNO(pagep) = argp->pgno;
+ LSN(pagep) = *lsnp;
+ flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ PREV_PGNO(pagep) = argp->next_pgno;
+ LSN(pagep) = argp->nnextlsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+ pagep = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __ham_metagroup_recover --
+ * Recovery function for metagroup.
+ *
+ * PUBLIC: int __ham_metagroup_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_metagroup_args *argp;
+ HASH_CURSOR *hcp;
+ DB *file_dbp;
+ DBMETA *mmeta;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ u_int32_t flags, mmeta_flags;
+ int cmp_n, cmp_p, did_recover, groupgrow, ret;
+
+ COMPQUIET(info, NULL);
+ mmeta_flags = 0;
+ mmeta = NULL;
+ REC_PRINT(__ham_metagroup_print);
+ REC_INTRO(__ham_metagroup_read, 1);
+
+ /*
+ * This logs the virtual create of pages pgno to pgno + bucket
+ * Since the mpool page-allocation is not really able to be
+ * transaction protected, we can never undo it. Even in an abort,
+ * we have to allocate these pages to the hash table if they
+ * were actually created. In particular, during disaster
+ * recovery the metapage may be before this point if we
+ * are rolling backward. If the file has not been extended
+ * then the metapage could not have been updated.
+ * The log record contains:
+ * bucket: new bucket being allocated.
+ * pgno: page number of the new bucket.
+ * if bucket is a power of 2, then we allocated a whole batch of
+ * pages; if it's not, then we simply allocated one new page.
+ */
+ groupgrow = (u_int32_t)(1 << __db_log2(argp->bucket + 1)) ==
+ argp->bucket + 1;
+ pgno = argp->pgno;
+ if (argp->newalloc)
+ pgno += argp->bucket;
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
+
+ flags = 0;
+ if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) {
+ /*
+ * We need to make sure that we redo the allocation of the
+ * pages.
+ */
+ if (DB_REDO(op))
+ pagep->lsn = *lsnp;
+ else
+ pagep->lsn = argp->pagelsn;
+ flags = DB_MPOOL_DIRTY;
+ }
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
+ goto out;
+
+ /* Now we have to update the meta-data page. */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto out;
+ cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn);
+ cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
+ did_recover = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the actual updating of bucket counts. */
+ ++hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask =
+ (argp->bucket + 1) | hcp->hdr->low_mask;
+ }
+ hcp->hdr->dbmeta.lsn = *lsnp;
+ did_recover = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the actual updating of bucket counts. */
+ --hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->high_mask = hcp->hdr->low_mask;
+ hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
+ }
+ hcp->hdr->dbmeta.lsn = argp->metalsn;
+ did_recover = 1;
+ }
+
+ /*
+ * Now we need to fix up the spares array. Each entry in the
+ * spares array indicates the beginning page number for the
+ * indicated doubling. We need to fill this in whenever the
+ * spares array is invalid, since we never reclaim pages from
+ * the spares array and we have to allocate the pages to the
+ * spares array in both the redo and undo cases.
+ */
+ if (argp->newalloc &&
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] == PGNO_INVALID) {
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] =
+ argp->pgno - argp->bucket - 1;
+ did_recover = 1;
+ }
+
+ /*
+ * Finally, we need to potentially fix up the last_pgno field
+ * in the master meta-data page (which may or may not be the
+ * same as the hash header page).
+ */
+ if (argp->mmpgno != argp->mpgno) {
+ if ((ret =
+ mpf->get(mpf, &argp->mmpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto out;
+ mmeta_flags = 0;
+ cmp_n = log_compare(lsnp, &mmeta->lsn);
+ cmp_p = log_compare(&mmeta->lsn, &argp->mmetalsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ mmeta->lsn = *lsnp;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ mmeta->lsn = argp->mmetalsn;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ }
+ } else
+ mmeta = (DBMETA *)hcp->hdr;
+
+ if (argp->newalloc) {
+ if (mmeta->last_pgno < pgno)
+ mmeta->last_pgno = pgno;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ }
+
+ if (argp->mmpgno != argp->mpgno &&
+ (ret = mpf->put(mpf, mmeta, mmeta_flags)) != 0)
+ goto out;
+ mmeta = NULL;
+
+ if (did_recover)
+ F_SET(hcp, H_DIRTY);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, 0);
+ if (dbc != NULL)
+ (void)__ham_release_meta(dbc);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+
+ REC_CLOSE;
+}
+
+/*
+ * __ham_groupalloc_recover --
+ * Recover the batch creation of a set of pages for a new database.
+ *
+ * PUBLIC: int __ham_groupalloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_groupalloc_args *argp;
+ DBMETA *mmeta;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ mmeta = NULL;
+ modified = 0;
+ REC_PRINT(__ham_groupalloc_print);
+ REC_INTRO(__ham_groupalloc_read, 0);
+
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &mmeta)) != 0) {
+ if (DB_REDO(op)) {
+ /* Page should have existed. */
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ } else {
+ ret = 0;
+ goto done;
+ }
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(mmeta));
+ cmp_p = log_compare(&LSN(mmeta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(mmeta), &argp->meta_lsn);
+
+ /*
+ * Basically, we used mpool to allocate a chunk of pages.
+ * We need to either add those to a free list (in the undo
+ * case) or initialize them (in the redo case).
+ *
+ * If we are redoing and this is a hash subdatabase, it's possible
+ * that the pages were never allocated, so we'd better check for
+ * that and handle it here.
+ */
+ if (DB_REDO(op)) {
+ if ((ret = __ham_alloc_pages(file_dbp, argp, lsnp)) != 0)
+ goto out;
+ if (cmp_p == 0) {
+ LSN(mmeta) = *lsnp;
+ modified = 1;
+ }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Reset the last page back to its preallocation state.
+ */
+ pgno = argp->start_pgno + argp->num - 1;
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
+
+ if (log_compare(&pagep->lsn, lsnp) == 0)
+ ZERO_LSN(pagep->lsn);
+
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ } else if (ret != DB_PAGE_NOTFOUND)
+ goto out;
+ /*
+ * Always put the pages into the limbo list and free them later.
+ */
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->start_pgno, argp->num)) != 0)
+ goto out;
+ if (cmp_n == 0) {
+ LSN(mmeta) = argp->meta_lsn;
+ modified = 1;
+ }
+ }
+
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, modified ? DB_MPOOL_DIRTY : 0);
+
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
+}
+
+/*
+ * __ham_alloc_pages --
+ *
+ * Called during redo of a file create. We create new pages in the file
+ * using the MPOOL_NEW_GROUP flag. We then log the meta-data page with a
+ * __crdel_metasub message. If we manage to crash without the newly written
+ * pages getting to disk (I'm not sure this can happen anywhere except our
+ * test suite?!), then we need to go through a recreate the final pages.
+ * Hash normally has holes in its files and handles them appropriately.
+ */
+static int
+__ham_alloc_pages(dbp, argp, lsnp)
+ DB *dbp;
+ __ham_groupalloc_args *argp;
+ DB_LSN *lsnp;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int ret;
+
+ mpf = dbp->mpf;
+
+ /* Read the last page of the allocation. */
+ pgno = argp->start_pgno + argp->num - 1;
+
+ /* If the page exists, and it has been initialized, then we're done. */
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
+ if (NUM_ENT(pagep) == 0 && IS_ZERO_LSN(pagep->lsn))
+ goto reinit_page;
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
+ return (ret);
+ return (0);
+ }
+
+ /* Had to create the page. */
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
+ return (ret);
+ }
+
+reinit_page:
+ /* Initialize the newly allocated page. */
+ P_INIT(pagep, dbp->pgsize, pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ pagep->lsn = *lsnp;
+
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __ham_curadj_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_curadj_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_curadj_args *argp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp;
+ DBC *dbc;
+ int ret;
+ HASH_CURSOR *hcp;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_curadj_print);
+ REC_INTRO(__ham_curadj_read, 0);
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /*
+ * Undo the adjustment by reinitializing the the cursor
+ * to look like the one that was used to do the adustment,
+ * then we invert the add so that undo the adjustment.
+ */
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hcp->pgno = argp->pgno;
+ hcp->indx = argp->indx;
+ hcp->dup_off = argp->dup_off;
+ hcp->order = argp->order;
+ if (!argp->add)
+ F_SET(hcp, H_DELETED);
+ (void)__ham_c_update(dbc, argp->len, !argp->add, argp->is_dup);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
+
+/*
+ * __ham_chgpg_recover --
+ * Undo cursor adjustments if a subtransaction fails.
+ *
+ * PUBLIC: int __ham_chgpg_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __ham_chgpg_args *argp;
+ BTREE_CURSOR *opdcp;
+ DB_MPOOLFILE *mpf;
+ DB *file_dbp, *ldbp;
+ DBC *dbc;
+ int ret;
+ DBC *cp;
+ HASH_CURSOR *lcp;
+ u_int32_t order, indx;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_chgpg_print);
+ REC_INTRO(__ham_chgpg_read, 0);
+
+ if (op != DB_TXN_ABORT)
+ goto done;
+
+ /* Overloaded fields for DB_HAM_DEL*PG */
+ indx = argp->old_indx;
+ order = argp->new_indx;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, file_dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == file_dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ lcp = (HASH_CURSOR *)cp->internal;
+
+ switch (argp->mode) {
+ case DB_HAM_DELFIRSTPG:
+ if (lcp->pgno != argp->new_pgno)
+ break;
+ if (lcp->indx != indx ||
+ !F_ISSET(lcp, H_DELETED) ||
+ lcp->order >= order) {
+ lcp->pgno = argp->old_pgno;
+ if (lcp->indx == indx)
+ lcp->order -= order;
+ }
+ break;
+ case DB_HAM_DELMIDPG:
+ case DB_HAM_DELLASTPG:
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == indx &&
+ F_ISSET(lcp, H_DELETED) &&
+ lcp->order >= order) {
+ lcp->pgno = argp->old_pgno;
+ lcp->order -= order;
+ lcp->indx = 0;
+ }
+ break;
+ case DB_HAM_CHGPG:
+ /*
+ * If we're doing a CHGPG, we're undoing
+ * the move of a non-deleted item to a
+ * new page. Any cursors with the deleted
+ * flag set do not belong to this item;
+ * don't touch them.
+ */
+ if (F_ISSET(lcp, H_DELETED))
+ break;
+ /* FALLTHROUGH */
+ case DB_HAM_SPLIT:
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == argp->new_indx) {
+ lcp->indx = argp->old_indx;
+ lcp->pgno = argp->old_pgno;
+ }
+ break;
+ case DB_HAM_DUP:
+ if (lcp->opd == NULL)
+ break;
+ opdcp = (BTREE_CURSOR *)lcp->opd->internal;
+ if (opdcp->pgno != argp->new_pgno ||
+ opdcp->indx != argp->new_indx)
+ break;
+
+ if (F_ISSET(opdcp, C_DELETED))
+ F_SET(lcp, H_DELETED);
+ /*
+ * We can't close a cursor while we have the
+ * dbp mutex locked, since c_close reacquires
+ * it. It should be safe to drop the mutex
+ * here, though, since newly opened cursors
+ * are put only at the end of the tailq and
+ * the cursor we're adjusting can't be closed
+ * under us.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ if ((ret = lcp->opd->c_close(lcp->opd)) != 0)
+ goto out;
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+ lcp->opd = NULL;
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+done: *lsnp = argp->prev_lsn;
+out: REC_CLOSE;
+}
diff --git a/storage/bdb/hash/hash_reclaim.c b/storage/bdb/hash/hash_reclaim.c
new file mode 100644
index 00000000000..ac90ffff08a
--- /dev/null
+++ b/storage/bdb/hash/hash_reclaim.c
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_reclaim.c,v 11.12 2002/03/28 19:49:43 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+
+/*
+ * __ham_reclaim --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list. For now, we link each freed page on the list
+ * separately. If people really store hash databases in subdatabases
+ * and do a lot of creates and deletes, this is going to be a problem,
+ * because hash needs chunks of contiguous storage. We may eventually
+ * need to go to a model where we maintain the free list with chunks of
+ * contiguous pages as well.
+ *
+ * PUBLIC: int __ham_reclaim __P((DB *, DB_TXN *txn));
+ */
+int
+__ham_reclaim(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_reclaim_callback, dbc, 1)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
+
+/*
+ * __ham_truncate --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list.
+ *
+ * PUBLIC: int __ham_truncate __P((DB *, DB_TXN *txn, u_int32_t *));
+ */
+int
+__ham_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ db_trunc_param trunc;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_truncate_callback, &trunc, 1)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ *countp = trunc.count;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
diff --git a/storage/bdb/hash/hash_stat.c b/storage/bdb/hash/hash_stat.c
new file mode 100644
index 00000000000..f9ee1d099cb
--- /dev/null
+++ b/storage/bdb/hash/hash_stat.c
@@ -0,0 +1,372 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_stat.c,v 11.48 2002/08/06 06:11:28 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+
+static int __ham_stat_callback __P((DB *, PAGE *, void *, int *));
+
+/*
+ * __ham_stat --
+ * Gather/print the hash statistics
+ *
+ * PUBLIC: int __ham_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__ham_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_HASH_STAT *sp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ PAGE *h;
+ db_pgno_t pgno;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ mpf = dbp->mpf;
+ sp = NULL;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+ /* Copy the fields that we have. */
+ sp->hash_nkeys = hcp->hdr->dbmeta.key_count;
+ sp->hash_ndata = hcp->hdr->dbmeta.record_count;
+ sp->hash_pagesize = dbp->pgsize;
+ sp->hash_buckets = hcp->hdr->max_bucket + 1;
+ sp->hash_magic = hcp->hdr->dbmeta.magic;
+ sp->hash_version = hcp->hdr->dbmeta.version;
+ sp->hash_metaflags = hcp->hdr->dbmeta.flags;
+ sp->hash_ffactor = hcp->hdr->ffactor;
+
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS)
+ goto done;
+
+ /* Walk the free list, counting pages. */
+ for (sp->hash_free = 0, pgno = hcp->hdr->dbmeta.free;
+ pgno != PGNO_INVALID;) {
+ ++sp->hash_free;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ pgno = h->next_pgno;
+ (void)mpf->put(mpf, h, 0);
+ }
+
+ /* Now traverse the rest of the table. */
+ sp->hash_nkeys = 0;
+ sp->hash_ndata = 0;
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_READ, __ham_stat_callback, sp, 0)) != 0)
+ goto err;
+
+ if (!F_ISSET(dbp, DB_AM_RDONLY)) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err;
+ hcp->hdr->dbmeta.key_count = sp->hash_nkeys;
+ hcp->hdr->dbmeta.record_count = sp->hash_ndata;
+ }
+
+done:
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+
+ *(DB_HASH_STAT **)spp = sp;
+ return (0);
+
+err: if (sp != NULL)
+ __os_ufree(dbenv, sp);
+ if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+
+}
+
+/*
+ * __ham_traverse
+ * Traverse an entire hash table. We use the callback so that we
+ * can use this both for stat collection and for deallocation.
+ *
+ * PUBLIC: int __ham_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: int (*)(DB *, PAGE *, void *, int *), void *, int));
+ */
+int
+__ham_traverse(dbc, mode, callback, cookie, look_past_max)
+ DBC *dbc;
+ db_lockmode_t mode;
+ int (*callback) __P((DB *, PAGE *, void *, int *));
+ void *cookie;
+ int look_past_max;
+{
+ DB *dbp;
+ DBC *opd;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HKEYDATA *hk;
+ db_pgno_t pgno, opgno;
+ int did_put, i, ret, t_ret;
+ u_int32_t bucket, spares_entry;
+
+ dbp = dbc->dbp;
+ opd = NULL;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
+ ret = 0;
+
+ /*
+ * In a perfect world, we could simply read each page in the file
+ * and look at its page type to tally the information necessary.
+ * Unfortunately, the bucket locking that hash tables do to make
+ * locking easy, makes this a pain in the butt. We have to traverse
+ * duplicate, overflow and big pages from the bucket so that we
+ * don't access anything that isn't properly locked.
+ *
+ */
+ for (bucket = 0;; bucket++) {
+ /*
+ * We put the loop exit condition check here, because
+ * it made for a really vile extended ?: that made SCO's
+ * compiler drop core.
+ *
+ * If look_past_max is not set, we can stop at max_bucket;
+ * if it is set, we need to include pages that are part of
+ * the current doubling but beyond the highest bucket we've
+ * split into, as well as pages from a "future" doubling
+ * that may have been created within an aborted
+ * transaction. To do this, keep looping (and incrementing
+ * bucket) until the corresponding spares array entries
+ * cease to be defined.
+ */
+ if (look_past_max) {
+ spares_entry = __db_log2(bucket + 1);
+ if (spares_entry >= NCACHED ||
+ hcp->hdr->spares[spares_entry] == 0)
+ break;
+ } else {
+ if (bucket > hcp->hdr->max_bucket)
+ break;
+ }
+
+ hcp->bucket = bucket;
+ hcp->pgno = pgno = BUCKET_TO_PAGE(hcp, bucket);
+ for (ret = __ham_get_cpage(dbc, mode); ret == 0;
+ ret = __ham_next_cpage(dbc, pgno, 0)) {
+
+ /*
+ * If we are cleaning up pages past the max_bucket,
+ * then they may be on the free list and have their
+ * next pointers set, but the should be ignored. In
+ * fact, we really ought to just skip anybody who is
+ * not a valid page.
+ */
+ if (TYPE(hcp->page) == P_INVALID)
+ break;
+ pgno = NEXT_PGNO(hcp->page);
+
+ /*
+ * Go through each item on the page checking for
+ * duplicates (in which case we have to count the
+ * duplicate pages) or big key/data items (in which
+ * case we have to count those pages).
+ */
+ for (i = 0; i < NUM_ENT(hcp->page); i++) {
+ hk = (HKEYDATA *)P_ENTRY(dbp, hcp->page, i);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_OFFDUP:
+ memcpy(&opgno, HOFFDUP_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_c_newopd(dbc,
+ opgno, NULL, &opd)) != 0)
+ return (ret);
+ if ((ret = __bam_traverse(opd,
+ DB_LOCK_READ, opgno,
+ callback, cookie))
+ != 0)
+ goto err;
+ if ((ret = opd->c_close(opd)) != 0)
+ return (ret);
+ opd = NULL;
+ break;
+ case H_OFFPAGE:
+ /*
+ * We are about to get a big page
+ * which will use the same spot that
+ * the current page uses, so we need
+ * to restore the current page before
+ * looking at it again.
+ */
+ memcpy(&opgno, HOFFPAGE_PGNO(hk),
+ sizeof(db_pgno_t));
+ if ((ret = __db_traverse_big(dbp,
+ opgno, callback, cookie)) != 0)
+ goto err;
+ break;
+ case H_KEYDATA:
+ break;
+ }
+ }
+
+ /* Call the callback on main pages. */
+ if ((ret = callback(dbp,
+ hcp->page, cookie, &did_put)) != 0)
+ goto err;
+
+ if (did_put)
+ hcp->page = NULL;
+ if (pgno == PGNO_INVALID)
+ break;
+ }
+ if (ret != 0)
+ goto err;
+
+ if (STD_LOCKING(dbc))
+ (void)dbp->dbenv->lock_put(dbp->dbenv, &hcp->lock);
+
+ if (hcp->page != NULL) {
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
+ return (ret);
+ hcp->page = NULL;
+ }
+
+ }
+err: if (opd != NULL &&
+ (t_ret = opd->c_close(opd)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+static int
+__ham_stat_callback(dbp, pagep, cookie, putp)
+ DB *dbp;
+ PAGE *pagep;
+ void *cookie;
+ int *putp;
+{
+ DB_HASH_STAT *sp;
+ DB_BTREE_STAT bstat;
+ db_indx_t indx, len, off, tlen, top;
+ u_int8_t *hk;
+ int ret;
+
+ *putp = 0;
+ sp = cookie;
+
+ switch (pagep->type) {
+ case P_INVALID:
+ /*
+ * Hash pages may be wholly zeroed; this is not a bug.
+ * Obviously such pages have no data, so we can just proceed.
+ */
+ break;
+ case P_HASH:
+ /*
+ * We count the buckets and the overflow pages
+ * separately and tally their bytes separately
+ * as well. We need to figure out if this page
+ * is a bucket.
+ */
+ if (PREV_PGNO(pagep) == PGNO_INVALID)
+ sp->hash_bfree += P_FREESPACE(dbp, pagep);
+ else {
+ sp->hash_overflows++;
+ sp->hash_ovfl_free += P_FREESPACE(dbp, pagep);
+ }
+ top = NUM_ENT(pagep);
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(dbp, pagep, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ sp->hash_ndata++;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(dbp, pagep, 0, indx);
+ hk = H_PAIRDATA(dbp, pagep, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ sp->hash_ndata++;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ sp->hash_nkeys += H_NUMPAIRS(pagep);
+ break;
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ /*
+ * These are all btree pages; get a correct
+ * cookie and call them. Then add appropriate
+ * fields into our stat structure.
+ */
+ memset(&bstat, 0, sizeof(bstat));
+ bstat.bt_dup_pgfree = 0;
+ bstat.bt_int_pgfree = 0;
+ bstat.bt_leaf_pgfree = 0;
+ bstat.bt_ndata = 0;
+ if ((ret = __bam_stat_callback(dbp, pagep, &bstat, putp)) != 0)
+ return (ret);
+ sp->hash_dup++;
+ sp->hash_dup_free += bstat.bt_leaf_pgfree +
+ bstat.bt_dup_pgfree + bstat.bt_int_pgfree;
+ sp->hash_ndata += bstat.bt_ndata;
+ break;
+ case P_OVERFLOW:
+ sp->hash_bigpages++;
+ sp->hash_big_bfree += P_OVFLSPACE(dbp, dbp->pgsize, pagep);
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, pagep->pgno));
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/hash/hash_upgrade.c b/storage/bdb/hash/hash_upgrade.c
new file mode 100644
index 00000000000..2dd21d7b644
--- /dev/null
+++ b/storage/bdb/hash/hash_upgrade.c
@@ -0,0 +1,266 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_upgrade.c,v 11.32 2002/08/06 05:34:58 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __ham_30_hashmeta --
+ * Upgrade the database from version 4/5 to version 6.
+ *
+ * PUBLIC: int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__ham_30_hashmeta(dbp, real_name, obuf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *obuf;
+{
+ DB_ENV *dbenv;
+ HASHHDR *oldmeta;
+ HMETA30 newmeta;
+ u_int32_t *o_spares, *n_spares;
+ u_int32_t fillf, maxb, nelem;
+ int i, max_entry, ret;
+
+ dbenv = dbp->dbenv;
+ memset(&newmeta, 0, sizeof(newmeta));
+
+ oldmeta = (HASHHDR *)obuf;
+
+ /*
+ * The first 32 bytes are similar. The only change is the version
+ * and that we removed the ovfl_point and have the page type now.
+ */
+
+ newmeta.dbmeta.lsn = oldmeta->lsn;
+ newmeta.dbmeta.pgno = oldmeta->pgno;
+ newmeta.dbmeta.magic = oldmeta->magic;
+ newmeta.dbmeta.version = 6;
+ newmeta.dbmeta.pagesize = oldmeta->pagesize;
+ newmeta.dbmeta.type = P_HASHMETA;
+
+ /* Move flags */
+ newmeta.dbmeta.flags = oldmeta->flags;
+
+ /* Copy the free list, which has changed its name but works the same. */
+ newmeta.dbmeta.free = oldmeta->last_freed;
+
+ /* Copy: max_bucket, high_mask, low-mask, ffactor, nelem, h_charkey */
+ newmeta.max_bucket = oldmeta->max_bucket;
+ newmeta.high_mask = oldmeta->high_mask;
+ newmeta.low_mask = oldmeta->low_mask;
+ newmeta.ffactor = oldmeta->ffactor;
+ newmeta.nelem = oldmeta->nelem;
+ newmeta.h_charkey = oldmeta->h_charkey;
+
+ /*
+ * There was a bug in 2.X versions where the nelem could go negative.
+ * In general, this is considered "bad." If it does go negative
+ * (that is, very large and positive), we'll die trying to dump and
+ * load this database. So, let's see if we can fix it here.
+ */
+ nelem = newmeta.nelem;
+ fillf = newmeta.ffactor;
+ maxb = newmeta.max_bucket;
+
+ if ((fillf != 0 && fillf * maxb < 2 * nelem) ||
+ (fillf == 0 && nelem > 0x8000000))
+ newmeta.nelem = 0;
+
+ /*
+ * We now have to convert the spares array. The old spares array
+ * contained the total number of extra pages allocated prior to
+ * the bucket that begins the next doubling. The new spares array
+ * contains the page number of the first bucket in the next doubling
+ * MINUS the bucket number of that bucket.
+ */
+ o_spares = oldmeta->spares;
+ n_spares = newmeta.spares;
+ max_entry = __db_log2(maxb + 1); /* highest spares entry in use */
+ n_spares[0] = 1;
+ for (i = 1; i < NCACHED && i <= max_entry; i++)
+ n_spares[i] = 1 + o_spares[i - 1];
+
+ /* Replace the unique ID. */
+ if ((ret = __os_fileid(dbenv, real_name, 1, newmeta.dbmeta.uid)) != 0)
+ return (ret);
+
+ /* Overwrite the original. */
+ memcpy(oldmeta, &newmeta, sizeof(newmeta));
+
+ return (0);
+}
+
+/*
+ * __ham_30_sizefix --
+ * Make sure that all hash pages belonging to the current
+ * hash doubling are within the bounds of the file.
+ *
+ * PUBLIC: int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+ */
+int
+__ham_30_sizefix(dbp, fhp, realname, metabuf)
+ DB *dbp;
+ DB_FH *fhp;
+ char *realname;
+ u_int8_t *metabuf;
+{
+ u_int8_t buf[DB_MAX_PGSIZE];
+ DB_ENV *dbenv;
+ HMETA30 *meta;
+ db_pgno_t last_actual, last_desired;
+ int ret;
+ size_t nw;
+ u_int32_t pagesize;
+
+ dbenv = dbp->dbenv;
+ memset(buf, 0, DB_MAX_PGSIZE);
+
+ meta = (HMETA30 *)metabuf;
+ pagesize = meta->dbmeta.pagesize;
+
+ /*
+ * Get the last page number. To do this, we'll need dbp->pgsize
+ * to be set right, so slam it into place.
+ */
+ dbp->pgsize = pagesize;
+ if ((ret = __db_lastpgno(dbp, realname, fhp, &last_actual)) != 0)
+ return (ret);
+
+ /*
+ * The last bucket in the doubling is equal to high_mask; calculate
+ * the page number that implies.
+ */
+ last_desired = BS_TO_PAGE(meta->high_mask, meta->spares);
+
+ /*
+ * If last_desired > last_actual, we need to grow the file. Write
+ * a zeroed page where last_desired would go.
+ */
+ if (last_desired > last_actual) {
+ if ((ret = __os_seek(dbenv,
+ fhp, pagesize, last_desired, 0, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, pagesize, &nw)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __ham_31_hashmeta --
+ * Upgrade the database from version 6 to version 7.
+ *
+ * PUBLIC: int __ham_31_hashmeta
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hashmeta(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HMETA31 *newmeta;
+ HMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+ COMPQUIET(fhp, NULL);
+
+ newmeta = (HMETA31 *)h;
+ oldmeta = (HMETA30 *)h;
+
+ /*
+ * Copy the fields down the page.
+ * The fields may overlap so start at the bottom and use memmove().
+ */
+ memmove(newmeta->spares, oldmeta->spares, sizeof(oldmeta->spares));
+ newmeta->h_charkey = oldmeta->h_charkey;
+ newmeta->nelem = oldmeta->nelem;
+ newmeta->ffactor = oldmeta->ffactor;
+ newmeta->low_mask = oldmeta->low_mask;
+ newmeta->high_mask = oldmeta->high_mask;
+ newmeta->max_bucket = oldmeta->max_bucket;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 7;
+
+ /* Upgrade the flags. */
+ if (LF_ISSET(DB_DUPSORT))
+ F_SET(&newmeta->dbmeta, DB_HASH_DUPSORT);
+
+ *dirtyp = 1;
+ return (0);
+}
+
+/*
+ * __ham_31_hash --
+ * Upgrade the database hash leaf pages.
+ *
+ * PUBLIC: int __ham_31_hash
+ * PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+ */
+int
+__ham_31_hash(dbp, real_name, flags, fhp, h, dirtyp)
+ DB *dbp;
+ char *real_name;
+ u_int32_t flags;
+ DB_FH *fhp;
+ PAGE *h;
+ int *dirtyp;
+{
+ HKEYDATA *hk;
+ db_pgno_t pgno, tpgno;
+ db_indx_t indx;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ for (indx = 0; indx < NUM_ENT(h); indx += 2) {
+ hk = (HKEYDATA *)H_PAIRDATA(dbp, h, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFDUP) {
+ memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
+ tpgno = pgno;
+ if ((ret = __db_31_offdup(dbp, real_name, fhp,
+ LF_ISSET(DB_DUPSORT) ? 1 : 0, &tpgno)) != 0)
+ break;
+ if (pgno != tpgno) {
+ *dirtyp = 1;
+ memcpy(HOFFDUP_PGNO(hk),
+ &tpgno, sizeof(db_pgno_t));
+ }
+ }
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/hash/hash_verify.c b/storage/bdb/hash/hash_verify.c
new file mode 100644
index 00000000000..e6f5a2b0d65
--- /dev/null
+++ b/storage/bdb/hash/hash_verify.c
@@ -0,0 +1,1079 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+
+static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
+static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ u_int32_t));
+static int __ham_vrfy_item __P((DB *,
+ VRFY_DBINFO *, db_pgno_t, PAGE *, u_int32_t, u_int32_t));
+
+/*
+ * __ham_vrfy_meta --
+ * Verify the hash-specific part of a metadata page.
+ *
+ * Note that unlike btree, we don't save things off, because we
+ * will need most everything again to verify each page and the
+ * amount of state here is significant.
+ *
+ * PUBLIC: int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__ham_vrfy_meta(dbp, vdp, m, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ HASH *hashp;
+ VRFY_PAGEINFO *pip;
+ int i, ret, t_ret, isbad;
+ u_int32_t pwr, mbucket;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ hashp = dbp->h_internal;
+
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ /*
+ * If we haven't already checked the common fields in pagezero,
+ * check them.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
+ (ret = __db_vrfy_meta(dbp, vdp, &m->dbmeta, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /* h_charkey */
+ if (!LF_ISSET(DB_NOORDERCHK))
+ if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
+ EPRINT((dbp->dbenv,
+"Page %lu: database has different custom hash function; reverify with DB_NOORDERCHK set",
+ (u_long)pgno));
+ /*
+ * Return immediately; this is probably a sign
+ * of user error rather than database corruption, so
+ * we want to avoid extraneous errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /* max_bucket must be less than the last pgno. */
+ if (m->max_bucket > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Impossible max_bucket %lu on meta page",
+ (u_long)pgno, (u_long)m->max_bucket));
+ /*
+ * Most other fields depend somehow on max_bucket, so
+ * we just return--there will be lots of extraneous
+ * errors.
+ */
+ isbad = 1;
+ goto err;
+ }
+
+ /*
+ * max_bucket, high_mask and low_mask: high_mask must be one
+ * less than the next power of two above max_bucket, and
+ * low_mask must be one less than the power of two below it.
+ *
+ *
+ */
+ pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
+ if (m->high_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect high_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->high_mask, (u_long)pwr - 1));
+ isbad = 1;
+ }
+ pwr >>= 1;
+ if (m->low_mask != pwr - 1) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect low_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->low_mask, (u_long)pwr - 1));
+ isbad = 1;
+ }
+
+ /* ffactor: no check possible. */
+ pip->h_ffactor = m->ffactor;
+
+ /*
+ * nelem: just make sure it's not astronomical for now. This is the
+ * same check that hash_upgrade does, since there was a bug in 2.X
+ * which could make nelem go "negative".
+ */
+ if (m->nelem > 0x80000000) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: suspiciously high nelem of %lu",
+ (u_long)pgno, (u_long)m->nelem));
+ isbad = 1;
+ pip->h_nelem = 0;
+ } else
+ pip->h_nelem = m->nelem;
+
+ /* flags */
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUP))
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (F_ISSET(&m->dbmeta, DB_HASH_DUPSORT))
+ F_SET(pip, VRFY_HAS_DUPSORT);
+ /* XXX: Why is the DB_HASH_SUBDB flag necessary? */
+
+ /* spares array */
+ for (i = 0; m->spares[i] != 0 && i < NCACHED; i++) {
+ /*
+ * We set mbucket to the maximum bucket that would use a given
+ * spares entry; we want to ensure that it's always less
+ * than last_pgno.
+ */
+ mbucket = (1 << i) - 1;
+ if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: spares array entry %d is invalid",
+ (u_long)pgno, i));
+ isbad = 1;
+ }
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy --
+ * Verify hash page.
+ *
+ * PUBLIC: int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ u_int32_t ent, himark, inpend;
+ db_indx_t *inp;
+ int isbad, ret, t_ret;
+
+ isbad = 0;
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ /* Sanity check our flags and page type. */
+ if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy",
+ flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0)
+ goto err;
+
+ if (TYPE(h) != P_HASH) {
+ TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h));
+ DB_ASSERT(0);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Verify and save off fields common to all PAGEs. */
+ if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * Verify inp[]. Each offset from 0 to NUM_ENT(h) must be lower
+ * than the previous one, higher than the current end of the inp array,
+ * and lower than the page size.
+ *
+ * In any case, we return immediately if things are bad, as it would
+ * be unsafe to proceed.
+ */
+ inp = P_INP(dbp, h);
+ for (ent = 0, himark = dbp->pgsize,
+ inpend = (u_int32_t)((u_int8_t *)inp - (u_int8_t *)h);
+ ent < NUM_ENT(h); ent++)
+ if (inp[ent] >= himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu is out of order or nonsensical",
+ (u_long)pgno, (u_long)ent));
+ isbad = 1;
+ goto err;
+ } else if (inpend >= himark) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: entries array collided with data",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+
+ } else {
+ himark = inp[ent];
+ inpend += sizeof(db_indx_t);
+ if ((ret = __ham_vrfy_item(
+ dbp, vdp, pgno, h, ent, flags)) != 0)
+ goto err;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_item --
+ * Given a hash page and an offset, sanity-check the item itself,
+ * and save off any overflow items or off-page dup children as necessary.
+ */
+static int
+__ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ u_int32_t i, flags;
+{
+ HOFFPAGE hop;
+ HOFFDUP hod;
+ VRFY_CHILDINFO child;
+ VRFY_PAGEINFO *pip;
+ db_indx_t offset, len, dlen, elen;
+ int ret, t_ret;
+ u_int8_t *databuf;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+
+ switch (HPAGE_TYPE(dbp, h, i)) {
+ case H_KEYDATA:
+ /* Nothing to do here--everything but the type field is data */
+ break;
+ case H_DUPLICATE:
+ /* Are we a datum or a key? Better be the former. */
+ if (i % 2 == 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash key stored as duplicate item %lu",
+ (u_long)pip->pgno, (u_long)i));
+ }
+ /*
+ * Dups are encoded as a series within a single HKEYDATA,
+ * in which each dup is surrounded by a copy of its length
+ * on either side (so that the series can be walked in either
+ * direction. We loop through this series and make sure
+ * each dup is reasonable.
+ *
+ * Note that at this point, we've verified item i-1, so
+ * it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
+ */
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
+ databuf = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
+
+ /* Make sure the length is plausible. */
+ if (offset + DUP_SIZE(dlen) > len) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate item %lu has bad length",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+
+ /*
+ * Make sure the second copy of the length is the
+ * same as the first.
+ */
+ memcpy(&elen,
+ databuf + offset + dlen + sizeof(db_indx_t),
+ sizeof(db_indx_t));
+ if (elen != dlen) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicate item %lu has two different lengths",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ }
+ F_SET(pip, VRFY_HAS_DUPS);
+ if (!LF_ISSET(DB_NOORDERCHK) &&
+ __ham_dups_unsorted(dbp, databuf, len))
+ F_SET(pip, VRFY_DUPS_UNSORTED);
+ break;
+ case H_OFFPAGE:
+ /* Offpage item. Make sure pgno is sane, save off. */
+ memcpy(&hop, P_ENTRY(dbp, h, i), HOFFPAGE_SIZE);
+ if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
+ hop.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pip->pgno, (u_long)i, (u_long)hop.pgno));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hop.pgno;
+ child.type = V_OVERFLOW;
+ child.tlen = hop.tlen; /* This will get checked later. */
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ break;
+ case H_OFFDUP:
+ /* Offpage duplicate item. Same drill. */
+ memcpy(&hod, P_ENTRY(dbp, h, i), HOFFDUP_SIZE);
+ if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
+ hod.pgno == PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: offpage item %lu has bad page number",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ goto err;
+ }
+ memset(&child, 0, sizeof(VRFY_CHILDINFO));
+ child.pgno = hod.pgno;
+ child.type = V_DUPLICATE;
+ if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
+ goto err;
+ F_SET(pip, VRFY_HAS_DUPS);
+ break;
+ default:
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %i has bad type",
+ (u_long)pip->pgno, (u_long)i));
+ ret = DB_VERIFY_BAD;
+ break;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __ham_vrfy_structure --
+ * Verify the structure of a hash database.
+ *
+ * PUBLIC: int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ * PUBLIC: u_int32_t));
+ */
+int
+__ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t meta_pgno;
+ u_int32_t flags;
+{
+ DB *pgset;
+ DB_MPOOLFILE *mpf;
+ HMETA *m;
+ PAGE *h;
+ VRFY_PAGEINFO *pip;
+ int isbad, p, ret, t_ret;
+ db_pgno_t pgno;
+ u_int32_t bucket, spares_entry;
+
+ mpf = dbp->mpf;
+ pgset = vdp->pgset;
+ h = NULL;
+ ret = isbad = 0;
+
+ if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
+ return (ret);
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: Hash meta page referenced twice",
+ (u_long)meta_pgno));
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
+ return (ret);
+
+ /* Get the meta page; we'll need it frequently. */
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &m)) != 0)
+ return (ret);
+
+ /* Loop through bucket by bucket. */
+ for (bucket = 0; bucket <= m->max_bucket; bucket++)
+ if ((ret =
+ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ /*
+ * There may be unused hash pages corresponding to buckets
+ * that have been allocated but not yet used. These may be
+ * part of the current doubling above max_bucket, or they may
+ * correspond to buckets that were used in a transaction
+ * that then aborted.
+ *
+ * Loop through them, as far as the spares array defines them,
+ * and make sure they're all empty.
+ *
+ * Note that this should be safe, since we've already verified
+ * that the spares array is sane.
+ */
+ for (bucket = m->max_bucket + 1; spares_entry = __db_log2(bucket + 1),
+ spares_entry < NCACHED && m->spares[spares_entry] != 0; bucket++) {
+ pgno = BS_TO_PAGE(bucket, m->spares);
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* It's okay if these pages are totally zeroed; unmark it. */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /* It's also OK if this page is simply invalid. */
+ if (pip->type == P_INVALID) {
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+
+ if (pip->type != P_HASH) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash bucket %lu maps to non-hash page",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ } else if (pip->entries != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: non-empty page in unused hash bucket %lu",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ } else {
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: above max_bucket referenced",
+ (u_long)pgno));
+ isbad = 1;
+ } else {
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ goto err;
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+ }
+
+ /* If we got here, it's an error. */
+ (void)__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
+ goto err;
+ }
+
+err: if ((t_ret = mpf->put(mpf, m, 0)) != 0)
+ return (t_ret);
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+ return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
+}
+
+/*
+ * __ham_vrfy_bucket --
+ * Verify a given bucket.
+ */
+static int
+__ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *m;
+ u_int32_t bucket, flags;
+{
+ HASH *hashp;
+ VRFY_CHILDINFO *child;
+ VRFY_PAGEINFO *mip, *pip;
+ int ret, t_ret, isbad, p;
+ db_pgno_t pgno, next_pgno;
+ DBC *cc;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+
+ isbad = 0;
+ pip = NULL;
+ cc = NULL;
+
+ hashp = dbp->h_internal;
+ if (hashp != NULL && hashp->h_hash != NULL)
+ hfunc = hashp->h_hash;
+ else
+ hfunc = __ham_func5;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0)
+ return (ret);
+
+ /* Calculate the first pgno for this bucket. */
+ pgno = BS_TO_PAGE(bucket, m->spares);
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ goto err;
+
+ /* Make sure we got a plausible page number. */
+ if (pgno > vdp->last_pgno || pip->type != P_HASH) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: impossible first page in bucket %lu",
+ (u_long)pgno, (u_long)bucket));
+ /* Unsafe to continue. */
+ isbad = 1;
+ goto err;
+ }
+
+ if (pip->prev_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: first page in hash bucket %lu has a prev_pgno",
+ (u_long)pgno, (u_long)bucket));
+ isbad = 1;
+ }
+
+ /*
+ * Set flags for dups and sorted dups.
+ */
+ flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0;
+ flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0;
+
+ /* Loop until we find a fatal bug, or until we run out of pages. */
+ for (;;) {
+ /* Provide feedback on our progress to the application. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0)
+ goto err;
+ if (p != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page referenced twice",
+ (u_long)pgno));
+ isbad = 1;
+ /* Unsafe to continue. */
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0)
+ goto err;
+
+ /*
+ * Hash pages that nothing has ever hashed to may never
+ * have actually come into existence, and may appear to be
+ * entirely zeroed. This is acceptable, and since there's
+ * no real way for us to know whether this has actually
+ * occurred, we clear the "wholly zeroed" flag on every
+ * hash page. A wholly zeroed page, by nature, will appear
+ * to have no flags set and zero entries, so should
+ * otherwise verify correctly.
+ */
+ F_CLR(pip, VRFY_IS_ALLZEROES);
+
+ /* If we have dups, our meta page had better know about it. */
+ if (F_ISSET(pip, VRFY_HAS_DUPS) &&
+ !F_ISSET(mip, VRFY_HAS_DUPS)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: duplicates present in non-duplicate database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /*
+ * If the database has sorted dups, this page had better
+ * not have unsorted ones.
+ */
+ if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
+ F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unsorted dups in sorted-dup database",
+ (u_long)pgno));
+ isbad = 1;
+ }
+
+ /* Walk overflow chains and offpage dup trees. */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ goto err;
+ for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &child))
+ if (child->type == V_OVERFLOW) {
+ if ((ret = __db_vrfy_ovfl_structure(dbp, vdp,
+ child->pgno, child->tlen, flags)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ } else if (child->type == V_DUPLICATE) {
+ if ((ret = __db_vrfy_duptype(dbp,
+ vdp, child->pgno, flags)) != 0) {
+ isbad = 1;
+ continue;
+ }
+ if ((ret = __bam_vrfy_subtree(dbp, vdp,
+ child->pgno, NULL, NULL,
+ flags | ST_RECNUM | ST_DUPSET | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ goto err;
+ cc = NULL;
+
+ /* If it's safe to check that things hash properly, do so. */
+ if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) &&
+ (ret = __ham_vrfy_hashing(dbp, pip->entries,
+ m, bucket, pgno, flags, hfunc)) != 0) {
+ if (ret == DB_VERIFY_BAD)
+ isbad = 1;
+ else
+ goto err;
+ }
+
+ next_pgno = pip->next_pgno;
+ ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
+
+ pip = NULL;
+ if (ret != 0)
+ goto err;
+
+ if (next_pgno == PGNO_INVALID)
+ break; /* End of the bucket. */
+
+ /* We already checked this, but just in case... */
+ if (!IS_VALID_PGNO(next_pgno)) {
+ DB_ASSERT(0);
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page has bad next_pgno",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
+ goto err;
+
+ if (pip->prev_pgno != pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page has bad prev_pgno",
+ (u_long)next_pgno));
+ isbad = 1;
+ }
+ pgno = next_pgno;
+ }
+
+err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
+ ret = t_ret;
+ if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
+ ret = t_ret;
+ if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
+ ret = t_ret;
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_vrfy_hashing --
+ * Verify that all items on a given hash page hash correctly.
+ *
+ * PUBLIC: int __ham_vrfy_hashing __P((DB *,
+ * PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
+ * PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+ */
+int
+__ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
+ DB *dbp;
+ u_int32_t nentries;
+ HMETA *m;
+ u_int32_t thisbucket;
+ db_pgno_t pgno;
+ u_int32_t flags;
+ u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
+{
+ DBT dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_indx_t i;
+ int ret, t_ret, isbad;
+ u_int32_t hval, bucket;
+
+ mpf = dbp->mpf;
+ ret = isbad = 0;
+
+ memset(&dbt, 0, sizeof(DBT));
+ F_SET(&dbt, DB_DBT_REALLOC);
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+
+ for (i = 0; i < nentries; i += 2) {
+ /*
+ * We've already verified the page integrity and that of any
+ * overflow chains linked off it; it is therefore safe to use
+ * __db_ret. It's also not all that much slower, since we have
+ * to copy every hash item to deal with alignment anyway; we
+ * can tweak this a bit if this proves to be a bottleneck,
+ * but for now, take the easy route.
+ */
+ if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0)
+ goto err;
+ hval = hfunc(dbp, dbt.data, dbt.size);
+
+ bucket = hval & m->high_mask;
+ if (bucket > m->max_bucket)
+ bucket = bucket & m->low_mask;
+
+ if (bucket != thisbucket) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: item %lu hashes incorrectly",
+ (u_long)pgno, (u_long)i));
+ isbad = 1;
+ }
+ }
+
+err: if (dbt.data != NULL)
+ __os_ufree(dbp->dbenv, dbt.data);
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
+ return (t_ret);
+
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __ham_salvage --
+ * Safely dump out anything that looks like a key on an alleged
+ * hash page.
+ *
+ * PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
+ * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
+ */
+int
+__ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ db_pgno_t pgno;
+ PAGE *h;
+ void *handle;
+ int (*callback) __P((void *, const void *));
+ u_int32_t flags;
+{
+ DBT dbt, unkdbt;
+ db_pgno_t dpgno;
+ int ret, err_ret, t_ret;
+ u_int32_t himark, tlen;
+ u_int8_t *hk;
+ void *buf;
+ u_int32_t dlen, len, i;
+
+ memset(&dbt, 0, sizeof(DBT));
+ dbt.flags = DB_DBT_REALLOC;
+
+ memset(&unkdbt, 0, sizeof(DBT));
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
+ unkdbt.data = "UNKNOWN";
+
+ err_ret = 0;
+
+ /*
+ * Allocate a buffer for overflow items. Start at one page;
+ * __db_safe_goff will realloc as needed.
+ */
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &buf)) != 0)
+ return (ret);
+
+ himark = dbp->pgsize;
+ for (i = 0;; i++) {
+ /* If we're not aggressive, break when we hit NUM_ENT(h). */
+ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
+ break;
+
+ /* Verify the current item. */
+ ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 0, flags, &himark, NULL);
+ /* If this returned a fatality, it's time to break. */
+ if (ret == DB_VERIFY_FATAL)
+ break;
+
+ if (ret == 0) {
+ hk = P_ENTRY(dbp, h, i);
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
+ if ((u_int32_t)(hk + len - (u_int8_t *)h) >
+ dbp->pgsize) {
+ /*
+ * Item is unsafely large; either continue
+ * or set it to the whole page, depending on
+ * aggressiveness.
+ */
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ continue;
+ len = dbp->pgsize -
+ (u_int32_t)(hk - (u_int8_t *)h);
+ err_ret = DB_VERIFY_BAD;
+ }
+ switch (HPAGE_PTYPE(hk)) {
+ default:
+ if (!LF_ISSET(DB_AGGRESSIVE))
+ break;
+ err_ret = DB_VERIFY_BAD;
+ /* FALLTHROUGH */
+ case H_KEYDATA:
+keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
+ dbt.size = len;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFPAGE:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ if ((ret = __db_safe_goff(dbp, vdp,
+ dpgno, &dbt, &buf, flags)) != 0) {
+ err_ret = ret;
+ (void)__db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp);
+ break;
+ }
+ if ((ret = __db_prdbt(&dbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ break;
+ case H_OFFDUP:
+ if (len < HOFFPAGE_SIZE) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+ memcpy(&dpgno,
+ HOFFPAGE_PGNO(hk), sizeof(dpgno));
+ /* UNKNOWN iff pgno is bad or we're a key. */
+ if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
+ if ((ret = __db_prdbt(&unkdbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ } else if ((ret = __db_salvage_duptree(dbp,
+ vdp, dpgno, &dbt, handle, callback,
+ flags | SA_SKIPFIRSTKEY)) != 0)
+ err_ret = ret;
+ break;
+ case H_DUPLICATE:
+ /*
+ * We're a key; printing dups will seriously
+ * foul the output. If we're being aggressive,
+ * pretend this is a key and let the app.
+ * programmer sort out the mess.
+ */
+ if (i % 2 == 0) {
+ err_ret = ret;
+ if (LF_ISSET(DB_AGGRESSIVE))
+ goto keydata;
+ break;
+ }
+
+ /* Too small to have any data. */
+ if (len <
+ HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
+ err_ret = DB_VERIFY_BAD;
+ continue;
+ }
+
+ /* Loop until we hit the total length. */
+ for (tlen = 0; tlen + sizeof(db_indx_t) < len;
+ tlen += dlen) {
+ tlen += sizeof(db_indx_t);
+ memcpy(&dlen, hk, sizeof(db_indx_t));
+ /*
+ * If dlen is too long, print all the
+ * rest of the dup set in a chunk.
+ */
+ if (dlen + tlen > len)
+ dlen = len - tlen;
+ memcpy(buf, hk + tlen, dlen);
+ dbt.size = dlen;
+ dbt.data = buf;
+ if ((ret = __db_prdbt(&dbt, 0, " ",
+ handle, callback, 0, vdp)) != 0)
+ err_ret = ret;
+ tlen += sizeof(db_indx_t);
+ }
+ break;
+ }
+ }
+ }
+
+ __os_free(dbp->dbenv, buf);
+ if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
+ return (t_ret);
+ return ((ret == 0 && err_ret != 0) ? err_ret : ret);
+}
+
+/*
+ * __ham_meta2pgset --
+ * Return the set of hash pages corresponding to the given
+ * known-good meta page.
+ *
+ * PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
+ * PUBLIC: DB *));
+ */
+int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ HMETA *hmeta;
+ u_int32_t flags;
+ DB *pgset;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ db_pgno_t pgno;
+ u_int32_t bucket, totpgs;
+ int ret, val;
+
+ /*
+ * We don't really need flags, but leave them for consistency with
+ * __bam_meta2pgset.
+ */
+ COMPQUIET(flags, 0);
+
+ DB_ASSERT(pgset != NULL);
+
+ mpf = dbp->mpf;
+ totpgs = 0;
+
+ /*
+ * Loop through all the buckets, pushing onto pgset the corresponding
+ * page(s) for each one.
+ */
+ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
+ pgno = BS_TO_PAGE(bucket, hmeta->spares);
+
+ /*
+ * We know the initial pgno is safe because the spares array has
+ * been verified.
+ *
+ * Safely walk the list of pages in this bucket.
+ */
+ for (;;) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ return (ret);
+ if (TYPE(h) == P_HASH) {
+
+ /*
+ * Make sure we don't go past the end of
+ * pgset.
+ */
+ if (++totpgs > vdp->last_pgno) {
+ (void)mpf->put(mpf, h, 0);
+ return (DB_VERIFY_BAD);
+ }
+ if ((ret =
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0) {
+ (void)mpf->put(mpf, h, 0);
+ return (ret);
+ }
+
+ pgno = NEXT_PGNO(h);
+ } else
+ pgno = PGNO_INVALID;
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ return (ret);
+
+ /* If the new pgno is wonky, go onto the next bucket. */
+ if (!IS_VALID_PGNO(pgno) ||
+ pgno == PGNO_INVALID)
+ break;
+
+ /*
+ * If we've touched this page before, we have a cycle;
+ * go on to the next bucket.
+ */
+ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
+ return (ret);
+ if (val != 0)
+ break;
+ }
+ }
+ return (0);
+}
+
+/*
+ * __ham_dups_unsorted --
+ * Takes a known-safe hash duplicate set and its total length.
+ * Returns 1 if there are out-of-order duplicates in this set,
+ * 0 if there are not.
+ */
+static int
+__ham_dups_unsorted(dbp, buf, len)
+ DB *dbp;
+ u_int8_t *buf;
+ u_int32_t len;
+{
+ DBT a, b;
+ db_indx_t offset, dlen;
+ int (*func) __P((DB *, const DBT *, const DBT *));
+
+ memset(&a, 0, sizeof(DBT));
+ memset(&b, 0, sizeof(DBT));
+
+ func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
+
+ /*
+ * Loop through the dup set until we hit the end or we find
+ * a pair of dups that's out of order. b is always the current
+ * dup, a the one before it.
+ */
+ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
+ memcpy(&dlen, buf + offset, sizeof(db_indx_t));
+ b.data = buf + offset + sizeof(db_indx_t);
+ b.size = dlen;
+
+ if (a.data != NULL && func(dbp, &a, &b) > 0)
+ return (1);
+
+ a.data = b.data;
+ a.size = b.size;
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/hmac/hmac.c b/storage/bdb/hmac/hmac.c
new file mode 100644
index 00000000000..d39a154ec63
--- /dev/null
+++ b/storage/bdb/hmac/hmac.c
@@ -0,0 +1,207 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Some parts of this code originally written by Adam Stubblefield,
+ * astubble@rice.edu.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hmac.c,v 1.25 2002/09/10 02:40:40 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h" /* for hash.h only */
+#include "dbinc/hash.h"
+#include "dbinc/hmac.h"
+
+#define HMAC_OUTPUT_SIZE 20
+#define HMAC_BLOCK_SIZE 64
+
+static void __db_hmac __P((u_int8_t *, u_int8_t *, size_t, u_int8_t *));
+
+/*
+ * !!!
+ * All of these functions use a ctx structure on the stack. The __db_SHA1Init
+ * call does not initialize the 64-byte buffer portion of it. The
+ * underlying SHA1 functions will properly pad the buffer if the data length
+ * is less than 64-bytes, so there isn't a chance of reading uninitialized
+ * memory. Although it would be cleaner to do a memset(ctx.buffer, 0, 64)
+ * we do not want to incur that penalty if we don't have to for performance.
+ */
+
+/*
+ * __db_hmac --
+ * Do a hashed MAC.
+ */
+static void
+__db_hmac(k, data, data_len, mac)
+ u_int8_t *k, *data, *mac;
+ size_t data_len;
+{
+ SHA1_CTX ctx;
+ u_int8_t key[HMAC_BLOCK_SIZE];
+ u_int8_t ipad[HMAC_BLOCK_SIZE];
+ u_int8_t opad[HMAC_BLOCK_SIZE];
+ u_int8_t tmp[HMAC_OUTPUT_SIZE];
+ int i;
+
+ memset(key, 0x00, HMAC_BLOCK_SIZE);
+ memset(ipad, 0x36, HMAC_BLOCK_SIZE);
+ memset(opad, 0x5C, HMAC_BLOCK_SIZE);
+
+ memcpy(key, k, HMAC_OUTPUT_SIZE);
+
+ for (i = 0; i < HMAC_BLOCK_SIZE; i++) {
+ ipad[i] ^= key[i];
+ opad[i] ^= key[i];
+ }
+
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, ipad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, data, data_len);
+ __db_SHA1Final(tmp, &ctx);
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, opad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, tmp, HMAC_OUTPUT_SIZE);
+ __db_SHA1Final(mac, &ctx);
+ return;
+}
+
+/*
+ * __db_chksum --
+ * Create a MAC/SHA1 checksum.
+ *
+ * PUBLIC: void __db_chksum __P((u_int8_t *, size_t, u_int8_t *, u_int8_t *));
+ */
+void
+__db_chksum(data, data_len, mac_key, store)
+ u_int8_t *data;
+ size_t data_len;
+ u_int8_t *mac_key;
+ u_int8_t *store;
+{
+ int sumlen;
+ u_int32_t hash4;
+ u_int8_t tmp[DB_MAC_KEY];
+
+ /*
+ * Since the checksum might be on a page of data we are checksumming
+ * we might be overwriting after checksumming, we zero-out the
+ * checksum value so that we can have a known value there when
+ * we verify the checksum.
+ */
+ if (mac_key == NULL)
+ sumlen = sizeof(u_int32_t);
+ else
+ sumlen = DB_MAC_KEY;
+ memset(store, 0, sumlen);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ memcpy(store, &hash4, sumlen);
+ } else {
+ memset(tmp, 0, DB_MAC_KEY);
+ __db_hmac(mac_key, data, data_len, tmp);
+ memcpy(store, tmp, sumlen);
+ }
+ return;
+}
+/*
+ * __db_derive_mac --
+ * Create a MAC/SHA1 key.
+ *
+ * PUBLIC: void __db_derive_mac __P((u_int8_t *, size_t, u_int8_t *));
+ */
+void
+__db_derive_mac(passwd, plen, mac_key)
+ u_int8_t *passwd;
+ size_t plen;
+ u_int8_t *mac_key;
+{
+ SHA1_CTX ctx;
+
+ /* Compute the MAC key. mac_key must be 20 bytes. */
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Update(&ctx, (u_int8_t *)DB_MAC_MAGIC, strlen(DB_MAC_MAGIC));
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Final(mac_key, &ctx);
+
+ return;
+}
+
+/*
+ * __db_check_chksum --
+ * Verify a checksum.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_check_chksum __P((DB_ENV *,
+ * PUBLIC: DB_CIPHER *, u_int8_t *, void *, size_t, int));
+ */
+int
+__db_check_chksum(dbenv, db_cipher, chksum, data, data_len, is_hmac)
+ DB_ENV *dbenv;
+ DB_CIPHER *db_cipher;
+ u_int8_t *chksum;
+ void *data;
+ size_t data_len;
+ int is_hmac;
+{
+ int ret;
+ size_t sum_len;
+ u_int32_t hash4;
+ u_int8_t *mac_key, old[DB_MAC_KEY], new[DB_MAC_KEY];
+
+ /*
+ * If we are just doing checksumming and not encryption, then checksum
+ * is 4 bytes. Otherwise, it is DB_MAC_KEY size. Check for illegal
+ * combinations of crypto/non-crypto checksums.
+ */
+ if (is_hmac == 0) {
+ if (db_cipher != NULL) {
+ __db_err(dbenv,
+ "Unencrypted checksum with a supplied encryption key");
+ return (EINVAL);
+ }
+ sum_len = sizeof(u_int32_t);
+ mac_key = NULL;
+ } else {
+ if (db_cipher == NULL) {
+ __db_err(dbenv,
+ "Encrypted checksum: no encryption key specified");
+ return (EINVAL);
+ }
+ sum_len = DB_MAC_KEY;
+ mac_key = db_cipher->mac_key;
+ }
+
+ /*
+ * !!!
+ * Since the checksum might be on the page, we need to have known data
+ * there so that we can generate the same original checksum. We zero
+ * it out, just like we do in __db_chksum above.
+ */
+ memcpy(old, chksum, sum_len);
+ memset(chksum, 0, sum_len);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ ret = memcmp((u_int32_t *)old, &hash4, sum_len) ? -1 : 0;
+ } else {
+ __db_hmac(mac_key, data, data_len, new);
+ ret = memcmp(old, new, sum_len) ? -1 : 0;
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/hmac/sha1.c b/storage/bdb/hmac/sha1.c
new file mode 100644
index 00000000000..2f2c806a21f
--- /dev/null
+++ b/storage/bdb/hmac/sha1.c
@@ -0,0 +1,294 @@
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: sha1.c,v 1.13 2002/04/09 13:40:36 sue Exp $";
+#endif /* not lint */
+/*
+SHA-1 in C
+By Steve Reid <sreid@sea-to-sky.net>
+100% Public Domain
+
+-----------------
+Modified 7/98
+By James H. Brown <jbrown@burgoyne.com>
+Still 100% Public Domain
+
+Corrected a problem which generated improper hash values on 16 bit machines
+Routine SHA1Update changed from
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned int
+len)
+to
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned
+long len)
+
+The 'len' parameter was declared an int which works fine on 32 bit machines.
+However, on 16 bit machines an int is too small for the shifts being done
+against
+it. This caused the hash function to generate incorrect values if len was
+greater than 8191 (8K - 1) due to the 'len << 3' on line 3 of SHA1Update().
+
+Since the file IO in main() reads 16K at a time, any file 8K or larger would
+be guaranteed to generate the wrong hash (e.g. Test Vector #3, a million
+"a"s).
+
+I also changed the declaration of variables i & j in SHA1Update to
+unsigned long from unsigned int for the same reason.
+
+These changes should make no difference to any 32 bit implementations since
+an
+int and a long are the same size in those environments.
+
+--
+I also corrected a few compiler warnings generated by Borland C.
+1. Added #include <process.h> for exit() prototype
+2. Removed unused variable 'j' in SHA1Final
+3. Changed exit(0) to return(0) at end of main.
+
+ALL changes I made can be located by searching for comments containing 'JHB'
+-----------------
+Modified 8/98
+By Steve Reid <sreid@sea-to-sky.net>
+Still 100% public domain
+
+1- Removed #include <process.h> and used return() instead of exit()
+2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall)
+3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net
+
+-----------------
+Modified 4/01
+By Saul Kravitz <Saul.Kravitz@celera.com>
+Still 100% PD
+Modified to run on Compaq Alpha hardware.
+
+
+*/
+
+/*
+Test Vectors (from FIPS PUB 180-1)
+"abc"
+ A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
+"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
+A million repetitions of "a"
+ 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
+*/
+
+#define SHA1HANDSOFF
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/hmac.h"
+
+/* #include <process.h> */ /* prototype for exit() - JHB */
+/* Using return() instead of exit() - SWR */
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) is_bigendian ? block->l[i] : \
+ (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
+ |(rol(block->l[i],8)&0x00FF00FF))
+#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
+ ^block->l[(i+2)&15]^block->l[i&15],1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+
+#ifdef VERBOSE /* SAK */
+static void __db_SHAPrintContext __P((SHA1_CTX *, char *));
+static void
+__db_SHAPrintContext(context, msg)
+ SHA1_CTX *context;
+ char *msg;
+{
+ printf("%s (%d,%d) %x %x %x %x %x\n",
+ msg,
+ context->count[0], context->count[1],
+ context->state[0],
+ context->state[1],
+ context->state[2],
+ context->state[3],
+ context->state[4]);
+}
+#endif
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+
+/*
+ * __db_SHA1Transform --
+ *
+ * PUBLIC: void __db_SHA1Transform __P((u_int32_t *, unsigned char *));
+ */
+void
+__db_SHA1Transform(state, buffer)
+ u_int32_t *state;
+ unsigned char *buffer;
+{
+u_int32_t a, b, c, d, e;
+typedef union {
+ unsigned char c[64];
+ u_int32_t l[16];
+} CHAR64LONG16;
+CHAR64LONG16* block;
+static int is_bigendian = -1;
+#ifdef SHA1HANDSOFF
+ unsigned char workspace[64];
+
+ block = (CHAR64LONG16*)workspace;
+ memcpy(block, buffer, 64);
+#else
+ block = (CHAR64LONG16*)buffer;
+#endif
+ if (is_bigendian == -1)
+ is_bigendian = __db_isbigendian();
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+}
+
+
+/* SHA1Init - Initialize new context */
+
+/*
+ * __db_SHA1Init --
+ * Initialize new context
+ *
+ * PUBLIC: void __db_SHA1Init __P((SHA1_CTX *));
+ */
+void
+__db_SHA1Init(context)
+ SHA1_CTX *context;
+{
+ /* SHA1 initialization constants */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count[0] = context->count[1] = 0;
+}
+
+
+/* Run your data through this. */
+
+/*
+ * __db_SHA1Update --
+ * Run your data through this.
+ *
+ * PUBLIC: void __db_SHA1Update __P((SHA1_CTX *, unsigned char *,
+ * PUBLIC: size_t));
+ */
+void
+__db_SHA1Update(context, data, len)
+ SHA1_CTX *context;
+ unsigned char *data;
+ size_t len;
+{
+u_int32_t i, j; /* JHB */
+
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "before");
+#endif
+ j = (context->count[0] >> 3) & 63;
+ if ((context->count[0] += (u_int32_t)len << 3) < (len << 3)) context->count[1]++;
+ context->count[1] += (u_int32_t)(len >> 29);
+ if ((j + len) > 63) {
+ memcpy(&context->buffer[j], data, (i = 64-j));
+ __db_SHA1Transform(context->state, context->buffer);
+ for ( ; i + 63 < len; i += 64) {
+ __db_SHA1Transform(context->state, &data[i]);
+ }
+ j = 0;
+ }
+ else i = 0;
+ memcpy(&context->buffer[j], &data[i], len - i);
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "after ");
+#endif
+}
+
+
+/* Add padding and return the message digest. */
+
+/*
+ * __db_SHA1Final --
+ * Add padding and return the message digest.
+ *
+ * PUBLIC: void __db_SHA1Final __P((unsigned char *, SHA1_CTX *));
+ */
+void
+__db_SHA1Final(digest, context)
+ unsigned char *digest;
+ SHA1_CTX *context;
+{
+u_int32_t i; /* JHB */
+unsigned char finalcount[8];
+
+ for (i = 0; i < 8; i++) {
+ finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
+ >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
+ }
+ __db_SHA1Update(context, (unsigned char *)"\200", 1);
+ while ((context->count[0] & 504) != 448) {
+ __db_SHA1Update(context, (unsigned char *)"\0", 1);
+ }
+ __db_SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform()
+*/
+ for (i = 0; i < 20; i++) {
+ digest[i] = (unsigned char)
+ ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+ /* Wipe variables */
+ i = 0; /* JHB */
+ memset(context->buffer, 0, 64);
+ memset(context->state, 0, 20);
+ memset(context->count, 0, 8);
+ memset(finalcount, 0, 8); /* SWR */
+#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
+ __db_SHA1Transform(context->state, context->buffer);
+#endif
+}
+
+/*************************************************************/
+
diff --git a/storage/bdb/hsearch/hsearch.c b/storage/bdb/hsearch/hsearch.c
new file mode 100644
index 00000000000..9760aeeb9e8
--- /dev/null
+++ b/storage/bdb/hsearch/hsearch.c
@@ -0,0 +1,160 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hsearch.c,v 11.12 2002/02/22 01:55:57 mjc Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+#include "db_int.h"
+
+static DB *dbp;
+static ENTRY retval;
+
+/*
+ * Translate HSEARCH calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_hcreate __P((size_t));
+ * EXTERN: ENTRY *__db_hsearch __P((ENTRY, ACTION));
+ * EXTERN: void __db_hdestroy __P((void));
+ *
+ * EXTERN: #endif
+ */
+int
+__db_hcreate(nel)
+ size_t nel;
+{
+ int ret;
+
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ __os_set_errno(ret);
+ return (1);
+ }
+
+ if ((ret = dbp->set_pagesize(dbp, 512)) != 0 ||
+ (ret = dbp->set_h_ffactor(dbp, 16)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, (u_int32_t)nel)) != 0 ||
+ (ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
+ __os_set_errno(ret);
+
+ /*
+ * !!!
+ * Hsearch returns 0 on error, not 1.
+ */
+ return (ret == 0 ? 1 : 0);
+}
+
+ENTRY *
+__db_hsearch(item, action)
+ ENTRY item;
+ ACTION action;
+{
+ DBT key, val;
+ int ret;
+
+ if (dbp == NULL) {
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ key.data = item.key;
+ key.size = (u_int32_t)strlen(item.key) + 1;
+
+ switch (action) {
+ case ENTER:
+ val.data = item.data;
+ val.size = (u_int32_t)strlen(item.data) + 1;
+
+ /*
+ * Try and add the key to the database. If we fail because
+ * the key already exists, return the existing key.
+ */
+ if ((ret =
+ dbp->put(dbp, NULL, &key, &val, DB_NOOVERWRITE)) == 0)
+ break;
+ if (ret == DB_KEYEXIST &&
+ (ret = dbp->get(dbp, NULL, &key, &val, 0)) == 0)
+ break;
+ /*
+ * The only possible DB error is DB_NOTFOUND, and it can't
+ * happen. Check for a DB error, and lie if we find one.
+ */
+ __os_set_errno(ret > 0 ? ret : EINVAL);
+ return (NULL);
+ case FIND:
+ if ((ret = dbp->get(dbp, NULL, &key, &val, 0)) != 0) {
+ if (ret != DB_NOTFOUND)
+ __os_set_errno(ret);
+ return (NULL);
+ }
+ item.data = (char *)val.data;
+ break;
+ default:
+ __os_set_errno(EINVAL);
+ return (NULL);
+ }
+ retval.key = item.key;
+ retval.data = item.data;
+ return (&retval);
+}
+
+void
+__db_hdestroy()
+{
+ if (dbp != NULL) {
+ (void)dbp->close(dbp, 0);
+ dbp = NULL;
+ }
+}
diff --git a/storage/bdb/libdb_java/checkapi.prl b/storage/bdb/libdb_java/checkapi.prl
new file mode 100644
index 00000000000..a27b8ffd107
--- /dev/null
+++ b/storage/bdb/libdb_java/checkapi.prl
@@ -0,0 +1,134 @@
+#
+# Released to public domain by Donald Anderson dda@world.std.com
+# No warranties.
+#
+# Perl script to check for matching of JNI interfaces to implementation.
+# We check all .cpp arguments and .h arguments and make sure that for
+# each .h declaration (marked by JNIEXPORT keyword), there is a .cpp
+# definition for the same function (also marked by JNIEXPORT keyword),
+# and vice versa. Definitions and declarations are determined solely
+# by whether they are in a .h or .cpp file - we don't do any further
+# analysis.
+#
+# Some additions made to help with Berkeley DB sources:
+#
+# Berkeley DB Java sources use JAVADB_*_ACCESS #defines
+# to quickly define routine access functions.
+
+foreach $file (<@ARGV>) { # glob allows direct use from Win* makefiles
+ open (FILE, $file) || die "$file: cannot open\n";
+ $dot_h = 0;
+ if ($file =~ /.*[hH]$/) {
+ $dot_h = 1;
+ }
+ $in_def = 0;
+nextline:
+ while (<FILE>) {
+ chop;
+ if (/JNIEXPORT/ || /^JAVADB_.*_ACCESS/) {
+ $in_def = 1;
+ $def = "";
+ }
+ if ($in_def == 1) {
+ $def .= " $_";
+ }
+ if (/\)/) {
+ $line = "";
+ $in_def = 0;
+ if ($def eq "") {
+ next nextline;
+ }
+ $_ = $def;
+ # remove comments
+ s@/\*[^*]*\*/@@g;
+ s@[ ][ ]*@ @g;
+ s@^[ ]@@g;
+ s@[ ]$@@g;
+ s@JNIEnv *\* *@JNIEnv @g;
+ s@([,*()]) @\1@g;
+ s@ ([,*()])@\1@g;
+
+ s/JAVADB_WO_ACCESS_METHOD/JAVADB_WO_ACCESS/;
+
+ if (/^JAVADB_.*_ACCESS/) {
+ s@ *@ @g;
+ s@_ACCESS_STRING\(([^,]*),@_ACCESS(\1,jstring,@;
+ s@_ACCESS_BEFORE_APPINIT@_ACCESS@;
+ s@_ACCESS\(@,normal,@;
+ s@JAVADB_@@;
+ s@\)@,@;
+ @vars = split(/,/);
+ $get = 0;
+ $set = 0;
+ if (@vars[0] eq "RW") {
+ $get = 1;
+ $set = 1;
+ }
+ if (@vars[0] eq "RO") {
+ $get = 1;
+ }
+ if (@vars[0] eq "WO") {
+ $set = 1;
+ }
+ if ($get == 0 && $set == 0) {
+ print "Invalid use of JAVADB_ macro\n";
+ }
+ if ($set == 1) {
+ $line = "JNIEXPORT void JNICALL Java_com_sleepycat_db_@vars[2]_set_1@vars[4](JNIEnv,jobject,@vars[3])";
+ }
+ if ($get == 1) {
+ $line2 = "JNIEXPORT @vars[3] JNICALL Java_com_sleepycat_db_@vars[2]_get_1@vars[4](JNIEnv,jobject)";
+ }
+ }
+ else {
+ s@([,(][a-zA-Z0-9_]*) [a-zA-Z0-9_]*@\1@g;
+ s@;$@@g;
+ $line = $_;
+ }
+
+ $def = "";
+
+ if ($line ne "") {
+ if ($lines{$line} eq "") {
+ $lines{$line} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line} += 1;
+ }
+ else {
+ $lines{$line} -= 1;
+ }
+ $line = "";
+ }
+ if ($line2 ne "") {
+ if ($lines{$line2} eq "") {
+ $lines{$line2} = 0;
+ }
+ if ($dot_h == 1) {
+ $lines{$line2} += 1;
+ }
+ else {
+ $lines{$line2} -= 1;
+ }
+ $line2 = "";
+ }
+ }
+ }
+ close (FILE);
+}
+
+$status = 0;
+foreach $key (sort keys %lines) {
+ if ($lines{$key} != 0) {
+ if ($lines{$key} > 0) {
+ print "Missing .cpp implementation: $lines${key}\n";
+ $status = 1;
+ }
+ else {
+ print "Missing .h declaration: $lines${key}\n";
+ $status = 1;
+ }
+ }
+}
+
+exit ($status);
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Db.h b/storage/bdb/libdb_java/com_sleepycat_db_Db.h
new file mode 100644
index 00000000000..0787ae87aed
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Db.h
@@ -0,0 +1,598 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Db */
+
+#ifndef _Included_com_sleepycat_db_Db
+#define _Included_com_sleepycat_db_Db
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef com_sleepycat_db_Db_DB_BTREE
+#define com_sleepycat_db_Db_DB_BTREE 1L
+#undef com_sleepycat_db_Db_DB_DONOTINDEX
+#define com_sleepycat_db_Db_DB_DONOTINDEX -30999L
+#undef com_sleepycat_db_Db_DB_HASH
+#define com_sleepycat_db_Db_DB_HASH 2L
+#undef com_sleepycat_db_Db_DB_KEYEMPTY
+#define com_sleepycat_db_Db_DB_KEYEMPTY -30998L
+#undef com_sleepycat_db_Db_DB_KEYEXIST
+#define com_sleepycat_db_Db_DB_KEYEXIST -30997L
+#undef com_sleepycat_db_Db_DB_LOCK_DEADLOCK
+#define com_sleepycat_db_Db_DB_LOCK_DEADLOCK -30996L
+#undef com_sleepycat_db_Db_DB_LOCK_NOTGRANTED
+#define com_sleepycat_db_Db_DB_LOCK_NOTGRANTED -30995L
+#undef com_sleepycat_db_Db_DB_NOSERVER
+#define com_sleepycat_db_Db_DB_NOSERVER -30994L
+#undef com_sleepycat_db_Db_DB_NOSERVER_HOME
+#define com_sleepycat_db_Db_DB_NOSERVER_HOME -30993L
+#undef com_sleepycat_db_Db_DB_NOSERVER_ID
+#define com_sleepycat_db_Db_DB_NOSERVER_ID -30992L
+#undef com_sleepycat_db_Db_DB_NOTFOUND
+#define com_sleepycat_db_Db_DB_NOTFOUND -30991L
+#undef com_sleepycat_db_Db_DB_OLD_VERSION
+#define com_sleepycat_db_Db_DB_OLD_VERSION -30990L
+#undef com_sleepycat_db_Db_DB_PAGE_NOTFOUND
+#define com_sleepycat_db_Db_DB_PAGE_NOTFOUND -30989L
+#undef com_sleepycat_db_Db_DB_QUEUE
+#define com_sleepycat_db_Db_DB_QUEUE 4L
+#undef com_sleepycat_db_Db_DB_RECNO
+#define com_sleepycat_db_Db_DB_RECNO 3L
+#undef com_sleepycat_db_Db_DB_REP_DUPMASTER
+#define com_sleepycat_db_Db_DB_REP_DUPMASTER -30988L
+#undef com_sleepycat_db_Db_DB_REP_HOLDELECTION
+#define com_sleepycat_db_Db_DB_REP_HOLDELECTION -30987L
+#undef com_sleepycat_db_Db_DB_REP_NEWMASTER
+#define com_sleepycat_db_Db_DB_REP_NEWMASTER -30986L
+#undef com_sleepycat_db_Db_DB_REP_NEWSITE
+#define com_sleepycat_db_Db_DB_REP_NEWSITE -30985L
+#undef com_sleepycat_db_Db_DB_REP_OUTDATED
+#define com_sleepycat_db_Db_DB_REP_OUTDATED -30984L
+#undef com_sleepycat_db_Db_DB_RUNRECOVERY
+#define com_sleepycat_db_Db_DB_RUNRECOVERY -30982L
+#undef com_sleepycat_db_Db_DB_SECONDARY_BAD
+#define com_sleepycat_db_Db_DB_SECONDARY_BAD -30981L
+#undef com_sleepycat_db_Db_DB_TXN_ABORT
+#define com_sleepycat_db_Db_DB_TXN_ABORT 0L
+#undef com_sleepycat_db_Db_DB_TXN_APPLY
+#define com_sleepycat_db_Db_DB_TXN_APPLY 1L
+#undef com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL 3L
+#undef com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL 4L
+#undef com_sleepycat_db_Db_DB_TXN_PRINT
+#define com_sleepycat_db_Db_DB_TXN_PRINT 8L
+#undef com_sleepycat_db_Db_DB_UNKNOWN
+#define com_sleepycat_db_Db_DB_UNKNOWN 5L
+#undef com_sleepycat_db_Db_DB_VERIFY_BAD
+#define com_sleepycat_db_Db_DB_VERIFY_BAD -30980L
+/* Inaccessible static: DB_AFTER */
+/* Inaccessible static: DB_AGGRESSIVE */
+/* Inaccessible static: DB_APPEND */
+/* Inaccessible static: DB_ARCH_ABS */
+/* Inaccessible static: DB_ARCH_DATA */
+/* Inaccessible static: DB_ARCH_LOG */
+/* Inaccessible static: DB_AUTO_COMMIT */
+/* Inaccessible static: DB_BEFORE */
+/* Inaccessible static: DB_CACHED_COUNTS */
+/* Inaccessible static: DB_CDB_ALLDB */
+/* Inaccessible static: DB_CHKSUM_SHA1 */
+/* Inaccessible static: DB_CLIENT */
+/* Inaccessible static: DB_CONSUME */
+/* Inaccessible static: DB_CONSUME_WAIT */
+/* Inaccessible static: DB_CREATE */
+/* Inaccessible static: DB_CURRENT */
+/* Inaccessible static: DB_CXX_NO_EXCEPTIONS */
+/* Inaccessible static: DB_DBT_MALLOC */
+/* Inaccessible static: DB_DBT_PARTIAL */
+/* Inaccessible static: DB_DBT_REALLOC */
+/* Inaccessible static: DB_DBT_USERMEM */
+/* Inaccessible static: DB_DIRECT */
+/* Inaccessible static: DB_DIRECT_DB */
+/* Inaccessible static: DB_DIRECT_LOG */
+/* Inaccessible static: DB_DIRTY_READ */
+/* Inaccessible static: DB_DUP */
+/* Inaccessible static: DB_DUPSORT */
+/* Inaccessible static: DB_EID_BROADCAST */
+/* Inaccessible static: DB_EID_INVALID */
+/* Inaccessible static: DB_ENCRYPT */
+/* Inaccessible static: DB_ENCRYPT_AES */
+/* Inaccessible static: DB_EXCL */
+/* Inaccessible static: DB_FAST_STAT */
+/* Inaccessible static: DB_FIRST */
+/* Inaccessible static: DB_FLUSH */
+/* Inaccessible static: DB_FORCE */
+/* Inaccessible static: DB_GET_BOTH */
+/* Inaccessible static: DB_GET_BOTH_RANGE */
+/* Inaccessible static: DB_GET_RECNO */
+/* Inaccessible static: DB_INIT_CDB */
+/* Inaccessible static: DB_INIT_LOCK */
+/* Inaccessible static: DB_INIT_LOG */
+/* Inaccessible static: DB_INIT_MPOOL */
+/* Inaccessible static: DB_INIT_TXN */
+/* Inaccessible static: DB_JOINENV */
+/* Inaccessible static: DB_JOIN_ITEM */
+/* Inaccessible static: DB_JOIN_NOSORT */
+/* Inaccessible static: DB_KEYFIRST */
+/* Inaccessible static: DB_KEYLAST */
+/* Inaccessible static: DB_LAST */
+/* Inaccessible static: DB_LOCKDOWN */
+/* Inaccessible static: DB_LOCK_DEFAULT */
+/* Inaccessible static: DB_LOCK_EXPIRE */
+/* Inaccessible static: DB_LOCK_GET */
+/* Inaccessible static: DB_LOCK_GET_TIMEOUT */
+/* Inaccessible static: DB_LOCK_IREAD */
+/* Inaccessible static: DB_LOCK_IWR */
+/* Inaccessible static: DB_LOCK_IWRITE */
+/* Inaccessible static: DB_LOCK_MAXLOCKS */
+/* Inaccessible static: DB_LOCK_MINLOCKS */
+/* Inaccessible static: DB_LOCK_MINWRITE */
+/* Inaccessible static: DB_LOCK_NOWAIT */
+/* Inaccessible static: DB_LOCK_OLDEST */
+/* Inaccessible static: DB_LOCK_PUT */
+/* Inaccessible static: DB_LOCK_PUT_ALL */
+/* Inaccessible static: DB_LOCK_PUT_OBJ */
+/* Inaccessible static: DB_LOCK_RANDOM */
+/* Inaccessible static: DB_LOCK_READ */
+/* Inaccessible static: DB_LOCK_TIMEOUT */
+/* Inaccessible static: DB_LOCK_WRITE */
+/* Inaccessible static: DB_LOCK_YOUNGEST */
+/* Inaccessible static: DB_MULTIPLE */
+/* Inaccessible static: DB_MULTIPLE_KEY */
+/* Inaccessible static: DB_NEXT */
+/* Inaccessible static: DB_NEXT_DUP */
+/* Inaccessible static: DB_NEXT_NODUP */
+/* Inaccessible static: DB_NODUPDATA */
+/* Inaccessible static: DB_NOLOCKING */
+/* Inaccessible static: DB_NOMMAP */
+/* Inaccessible static: DB_NOORDERCHK */
+/* Inaccessible static: DB_NOOVERWRITE */
+/* Inaccessible static: DB_NOPANIC */
+/* Inaccessible static: DB_NOSYNC */
+/* Inaccessible static: DB_ODDFILESIZE */
+/* Inaccessible static: DB_ORDERCHKONLY */
+/* Inaccessible static: DB_OVERWRITE */
+/* Inaccessible static: DB_PANIC_ENVIRONMENT */
+/* Inaccessible static: DB_POSITION */
+/* Inaccessible static: DB_PREV */
+/* Inaccessible static: DB_PREV_NODUP */
+/* Inaccessible static: DB_PRINTABLE */
+/* Inaccessible static: DB_PRIORITY_DEFAULT */
+/* Inaccessible static: DB_PRIORITY_HIGH */
+/* Inaccessible static: DB_PRIORITY_LOW */
+/* Inaccessible static: DB_PRIORITY_VERY_HIGH */
+/* Inaccessible static: DB_PRIORITY_VERY_LOW */
+/* Inaccessible static: DB_PRIVATE */
+/* Inaccessible static: DB_RDONLY */
+/* Inaccessible static: DB_RECNUM */
+/* Inaccessible static: DB_RECORDCOUNT */
+/* Inaccessible static: DB_RECOVER */
+/* Inaccessible static: DB_RECOVER_FATAL */
+/* Inaccessible static: DB_REGION_INIT */
+/* Inaccessible static: DB_RENUMBER */
+/* Inaccessible static: DB_REP_CLIENT */
+/* Inaccessible static: DB_REP_LOGSONLY */
+/* Inaccessible static: DB_REP_MASTER */
+/* Inaccessible static: DB_REP_PERMANENT */
+/* Inaccessible static: DB_REP_UNAVAIL */
+/* Inaccessible static: DB_REVSPLITOFF */
+/* Inaccessible static: DB_RMW */
+/* Inaccessible static: DB_SALVAGE */
+/* Inaccessible static: DB_SET */
+/* Inaccessible static: DB_SET_LOCK_TIMEOUT */
+/* Inaccessible static: DB_SET_RANGE */
+/* Inaccessible static: DB_SET_RECNO */
+/* Inaccessible static: DB_SET_TXN_TIMEOUT */
+/* Inaccessible static: DB_SNAPSHOT */
+/* Inaccessible static: DB_STAT_CLEAR */
+/* Inaccessible static: DB_SYSTEM_MEM */
+/* Inaccessible static: DB_THREAD */
+/* Inaccessible static: DB_TRUNCATE */
+/* Inaccessible static: DB_TXN_NOSYNC */
+/* Inaccessible static: DB_TXN_NOWAIT */
+/* Inaccessible static: DB_TXN_SYNC */
+/* Inaccessible static: DB_TXN_WRITE_NOSYNC */
+/* Inaccessible static: DB_UPGRADE */
+/* Inaccessible static: DB_USE_ENVIRON */
+/* Inaccessible static: DB_USE_ENVIRON_ROOT */
+/* Inaccessible static: DB_VERB_CHKPOINT */
+/* Inaccessible static: DB_VERB_DEADLOCK */
+/* Inaccessible static: DB_VERB_RECOVERY */
+/* Inaccessible static: DB_VERB_REPLICATION */
+/* Inaccessible static: DB_VERB_WAITSFOR */
+/* Inaccessible static: DB_VERIFY */
+/* Inaccessible static: DB_VERSION_MAJOR */
+/* Inaccessible static: DB_VERSION_MINOR */
+/* Inaccessible static: DB_VERSION_PATCH */
+/* Inaccessible static: DB_WRITECURSOR */
+/* Inaccessible static: DB_XA_CREATE */
+/* Inaccessible static: DB_XIDDATASIZE */
+/* Inaccessible static: DB_YIELDCPU */
+/* Inaccessible static: already_loaded_ */
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbEnv;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _notify_internal
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _associate
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Db;Lcom/sleepycat/db/DbSecondaryKeyCreate;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _close
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: cursor
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: del
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: fd
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_byteswapped
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_type
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: join
+ * Signature: ([Lcom/sleepycat/db/Dbc;I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *, jobject, jobjectArray, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: key_range
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbKeyRange;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _open
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _rename
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
+ (JNIEnv *, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: _remove
+ * Signature: (Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
+ (JNIEnv *, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: append_recno_changed
+ * Signature: (Lcom/sleepycat/db/DbAppendRecno;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreeCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_maxkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1maxkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_bt_minkey
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1bt_1minkey
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: bt_prefix_changed
+ * Signature: (Lcom/sleepycat/db/DbBtreePrefix;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_cache_priority
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cache_1priority
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: dup_compare_changed
+ * Signature: (Lcom/sleepycat/db/DbDupCompare;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_flags
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1flags
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: get_flags_raw
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1flags_1raw
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_ffactor
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1ffactor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: hash_changed
+ * Signature: (Lcom/sleepycat/db/DbHash;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_h_nelem
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1h_1nelem
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_lorder
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1lorder
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_pagesize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1pagesize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_delim
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1delim
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_len
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1len
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_pad
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1pad
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_re_source
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: set_q_extentsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1q_1extentsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: stat
+ * Signature: (I)Ljava/lang/Object;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: sync
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_sync
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: truncate
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: upgrade
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: verify
+ * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/io/OutputStream;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *, jobject, jstring, jstring, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: one_time_init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h b/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
new file mode 100644
index 00000000000..f239dfc7593
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h
@@ -0,0 +1,581 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbEnv */
+
+#ifndef _Included_com_sleepycat_db_DbEnv
+#define _Included_com_sleepycat_db_DbEnv
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: dbremove
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: dbrename
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *, jobject, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: err
+ * Signature: (ILjava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *, jobject, jint, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: errx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _finalize
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *, jobject, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init
+ * Signature: (Lcom/sleepycat/db/DbErrcall;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_db
+ * Signature: (Lcom/sleepycat/db/DbErrcall;Lcom/sleepycat/db/Db;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_xa
+ * Signature: (Lcom/sleepycat/db/DbErrcall;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1xa
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _notify_db_close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: open
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: remove
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_cachesize
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1cachesize
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errcall
+ * Signature: (Lcom/sleepycat/db/DbErrcall;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_errpfx
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: feedback_changed
+ * Signature: (Lcom/sleepycat/db/DbEnvFeedback;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_verbose
+ * Signature: (IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1verbose
+ (JNIEnv *, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_data_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1data_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_bsize
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1bsize
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_regionmax
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1regionmax
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_conflicts
+ * Signature: ([[B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *, jobject, jobjectArray);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_detect
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1detect
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_lockers
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1lockers
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_locks
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1locks
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_lk_max_objects
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1max_1objects
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_mp_mmapsize
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_flags
+ * Signature: (IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
+ (JNIEnv *, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_rep_limit
+ * Signature: (II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_transport_changed
+ * Signature: (ILcom/sleepycat/db/DbRepTransport;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *, jobject, jint, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_rpc_server
+ * Signature: (Lcom/sleepycat/db/DbClient;Ljava/lang/String;JJI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *, jobject, jobject, jstring, jlong, jlong, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_shm_key
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1shm_1key
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tas_spins
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tas_1spins
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_timeout
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tmp_dir
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tmp_1dir
+ (JNIEnv *, jobject, jstring);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: app_dispatch_changed
+ * Signature: (Lcom/sleepycat/db/DbAppDispatch;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_tx_max
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tx_1max
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: _set_tx_timestamp
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_major
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_minor
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_patch
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: get_version_string
+ * Signature: ()Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *, jclass);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: strerror
+ * Signature: (I)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_detect
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_get
+ * Signature: (IILcom/sleepycat/db/Dbt;I)Lcom/sleepycat/db/DbLock;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *, jobject, jint, jint, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_put
+ * Signature: (Lcom/sleepycat/db/DbLock;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_id_free
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1id_1free
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_stat
+ * Signature: (I)Lcom/sleepycat/db/DbLockStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_vec
+ * Signature: (II[Lcom/sleepycat/db/DbLockRequest;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *, jobject, jint, jint, jobjectArray, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_archive
+ * Signature: (I)[Ljava/lang/String;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_compare
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/DbLsn;)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *, jclass, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_cursor
+ * Signature: (I)Lcom/sleepycat/db/DbLogc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_file
+ * Signature: (Lcom/sleepycat/db/DbLsn;)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_flush
+ * Signature: (Lcom/sleepycat/db/DbLsn;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_put
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: log_stat
+ * Signature: (I)Lcom/sleepycat/db/DbLogStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_stat
+ * Signature: (I)Lcom/sleepycat/db/DbMpoolStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_fstat
+ * Signature: (I)[Lcom/sleepycat/db/DbMpoolFStat;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: memp_trickle
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_elect
+ * Signature: (III)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_process_message
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbEnv$RepProcessMessage;)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_start
+ * Signature: (Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_stat
+ * Signature: (I)Lcom/sleepycat/db/DbRepStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_begin
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)Lcom/sleepycat/db/DbTxn;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_checkpoint
+ * Signature: (III)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
+ (JNIEnv *, jobject, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_recover
+ * Signature: (II)[Lcom/sleepycat/db/DbPreplist;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: txn_stat
+ * Signature: (I)Lcom/sleepycat/db/DbTxnStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *, jobject, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
new file mode 100644
index 00000000000..9f3d77d44bc
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLock.h
@@ -0,0 +1,21 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLock */
+
+#ifndef _Included_com_sleepycat_db_DbLock
+#define _Included_com_sleepycat_db_DbLock
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLock
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
new file mode 100644
index 00000000000..8d029c761ba
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLogc */
+
+#ifndef _Included_com_sleepycat_db_DbLogc
+#define _Included_com_sleepycat_db_DbLogc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
new file mode 100644
index 00000000000..080fa0a8758
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h
@@ -0,0 +1,29 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLsn */
+
+#ifndef _Included_com_sleepycat_db_DbLsn
+#define _Included_com_sleepycat_db_DbLsn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbLsn
+ * Method: init_lsn
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h b/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
new file mode 100644
index 00000000000..59641c041a4
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h
@@ -0,0 +1,61 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbTxn */
+
+#ifndef _Included_com_sleepycat_db_DbTxn
+#define _Included_com_sleepycat_db_DbTxn
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: abort
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: commit
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: discard
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_discard
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: id
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: prepare
+ * Signature: ([B)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *, jobject, jbyteArray);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
+ * Method: set_timeout
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h b/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
new file mode 100644
index 00000000000..7f8495590c0
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h
@@ -0,0 +1,22 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbUtil */
+
+#ifndef _Included_com_sleepycat_db_DbUtil
+#define _Included_com_sleepycat_db_DbUtil
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: big_endian */
+/*
+ * Class: com_sleepycat_db_DbUtil
+ * Method: is_big_endian
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_DbUtil_is_1big_1endian
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
new file mode 100644
index 00000000000..447ab234844
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Dbc.h
@@ -0,0 +1,77 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbc */
+
+#ifndef _Included_com_sleepycat_db_Dbc
+#define _Included_com_sleepycat_db_Dbc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: count
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: del
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: dup
+ * Signature: (I)Lcom/sleepycat/db/Dbc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: put
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h b/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
new file mode 100644
index 00000000000..c09bd8e6131
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_Dbt.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_Dbt */
+
+#ifndef _Included_com_sleepycat_db_Dbt
+#define _Included_com_sleepycat_db_Dbt
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: init
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Dbt
+ * Method: create_data
+ * Signature: ()[B
+ */
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h b/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
new file mode 100644
index 00000000000..00e9e2e6893
--- /dev/null
+++ b/storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
@@ -0,0 +1,95 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_xa_DbXAResource */
+
+#ifndef _Included_com_sleepycat_db_xa_DbXAResource
+#define _Included_com_sleepycat_db_xa_DbXAResource
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: unique_rmid */
+/* Inaccessible static: class_00024com_00024sleepycat_00024db_00024xa_00024DbXAResource */
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _init
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _close
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _commit
+ * Signature: (Ljavax/transaction/xa/Xid;IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *, jobject, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _end
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _forget
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _prepare
+ * Signature: (Ljavax/transaction/xa/Xid;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _recover
+ * Signature: (II)[Ljavax/transaction/xa/Xid;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _rollback
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _start
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: xa_attach
+ * Signature: (Ljavax/transaction/xa/Xid;Ljava/lang/Integer;)Lcom/sleepycat/db/xa/DbXAResource$DbAttach;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *, jclass, jobject, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/storage/bdb/libdb_java/java_Db.c b/storage/bdb/libdb_java/java_Db.c
new file mode 100644
index 00000000000..465c40f7d5a
--- /dev/null
+++ b/storage/bdb/libdb_java/java_Db.c
@@ -0,0 +1,982 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Db.c,v 11.80 2002/08/29 14:22:23 margo Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc_auto/db_ext.h"
+#include "java_util.h"
+#include "java_stat_auto.h"
+#include "com_sleepycat_db_Db.h"
+
+/* This struct is used in Db.verify and its callback */
+struct verify_callback_struct {
+ JNIEnv *env;
+ jobject streamobj;
+ jbyteArray bytes;
+ int nbytes;
+ jmethodID writemid;
+};
+
+JAVADB_GET_FLD(Db, jint, flags_1raw, DB, flags)
+
+JAVADB_SET_METH(Db, jint, flags, DB, flags)
+JAVADB_SET_METH(Db, jint, h_1ffactor, DB, h_ffactor)
+JAVADB_SET_METH(Db, jint, h_1nelem, DB, h_nelem)
+JAVADB_SET_METH(Db, jint, lorder, DB, lorder)
+JAVADB_SET_METH(Db, jint, re_1delim, DB, re_delim)
+JAVADB_SET_METH(Db, jint, re_1len, DB, re_len)
+JAVADB_SET_METH(Db, jint, re_1pad, DB, re_pad)
+JAVADB_SET_METH(Db, jint, q_1extentsize, DB, q_extentsize)
+JAVADB_SET_METH(Db, jint, bt_1maxkey, DB, bt_maxkey)
+JAVADB_SET_METH(Db, jint, bt_1minkey, DB, bt_minkey)
+
+/*
+ * This only gets called once ever, at the beginning of execution
+ * and can be used to initialize unchanging methodIds, fieldIds, etc.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
+ (JNIEnv *jnienv, /*Db.class*/ jclass jthisclass)
+{
+ COMPQUIET(jthisclass, NULL);
+
+ one_time_init(jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbEnv*/ jobject jdbenv, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jdbenv);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(dbinfo == NULL);
+
+ err = db_create(&db, dbenv, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB, jthis, db);
+ dbinfo = dbji_construct(jnienv, jthis, flags);
+ set_private_info(jnienv, name_DB, jthis, dbinfo);
+ db->api_internal = dbinfo;
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /* DbTxn */ jobject jtxn,
+ /*Db*/ jobject jsecondary, /*DbSecondaryKeyCreate*/ jobject jcallback,
+ jint flags)
+{
+ DB *db, *secondary;
+ DB_JAVAINFO *second_info;
+ DB_TXN *txn;
+
+ db = get_DB(jnienv, jthis);
+ txn = get_DB_TXN(jnienv, jtxn);
+ secondary = get_DB(jnienv, jsecondary);
+
+ second_info = (DB_JAVAINFO*)secondary->api_internal;
+ dbji_set_assoc_object(second_info, jnienv, db, txn, secondary,
+ jcallback, flags);
+
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ /*
+ * Null out the private data to indicate the DB is invalid.
+ * We do this in advance to help guard against multithreading
+ * issues.
+ */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ err = db->close(db, flags);
+ verify_return(jnienv, err, 0);
+ dbji_dealloc(dbinfo, jnienv);
+
+ return (err);
+}
+
+/*
+ * We are being notified that the parent DbEnv has closed.
+ * Zero out the pointer to the DB, since it is no longer
+ * valid, to prevent mistakes. The user will get a null
+ * pointer exception if they try to use this Db again.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbAppendRecno*/ jobject jcallback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_append_recno_object(dbinfo, jnienv, db, jcallback);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreeCompare*/ jobject jbtcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_bt_compare_object(dbinfo, jnienv, db, jbtcompare);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbBtreePrefix*/ jobject jbtprefix)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_bt_prefix_object(dbinfo, jnienv, db, jbtprefix);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid, jint flags)
+{
+ int err;
+ DBC *dbc;
+ DB *db = get_DB(jnienv, jthis);
+ DB_TXN *dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+ err = db->cursor(db, dbtxnid, &dbc, flags);
+ verify_return(jnienv, err, 0);
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, jint dbflags)
+{
+ int err;
+ DB_TXN *dbtxnid;
+ DB *db;
+ LOCKED_DBT lkey;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, key, inOp) != 0)
+ goto out;
+
+ err = db->del(db, dbtxnid, &lkey.javainfo->dbt, dbflags);
+ if (!DB_RETOK_DBDEL(err))
+ verify_return(jnienv, err, 0);
+
+ out:
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbDupCompare*/ jobject jdupcompare)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_dup_compare_object(dbinfo, jnienv, db, jdupcompare);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jint ecode, jstring msg)
+{
+ DB *db;
+ LOCKED_STRING ls_msg;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ db->err(db, ecode, "%s", ls_msg.string);
+
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+ if (!verify_non_null(jnienv, db))
+ goto out;
+
+ db->errx(db, "%s", ls_msg.string);
+
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ int err;
+ int return_value = 0;
+ DB *db = get_DB(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->fd(db, &return_value);
+ verify_return(jnienv, err, 0);
+
+ return (return_value);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring jpasswd, jint flags)
+{
+ int err;
+ DB *db;
+ LOCKED_STRING ls_passwd;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = db->set_encrypt(db, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_feedback_object(dbinfo, jnienv, db, jfeedback);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ DB_ENV *dbenv;
+ OpKind keyop, dataop;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, ldata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out3;
+ dbenv = db->dbenv;
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->get(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ out3:
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbHash*/ jobject jhash)
+{
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
+ dbji_set_h_hash_object(dbinfo, jnienv, db, jhash);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*Dbc[]*/ jobjectArray curslist,
+ jint flags)
+{
+ int err;
+ DB *db;
+ int count;
+ DBC **newlist;
+ DBC *dbc;
+ int i;
+ int size;
+
+ db = get_DB(jnienv, jthis);
+ count = (*jnienv)->GetArrayLength(jnienv, curslist);
+ size = sizeof(DBC *) * (count+1);
+ if ((err = __os_malloc(db->dbenv, size, &newlist)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+ }
+
+ /* Convert the java array of Dbc's to a C array of DBC's. */
+ for (i = 0; i < count; i++) {
+ jobject jobj =
+ (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
+ if (jobj == 0) {
+ /*
+ * An embedded null in the array is treated
+ * as an endpoint.
+ */
+ newlist[i] = 0;
+ break;
+ }
+ else {
+ newlist[i] = get_DBC(jnienv, jobj);
+ }
+ }
+ newlist[count] = 0;
+
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+
+ err = db->join(db, newlist, &dbc, flags);
+ verify_return(jnienv, err, 0);
+ __os_free(db->dbenv, newlist);
+
+ return (get_Dbc(jnienv, dbc));
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject jkey, jobject /*DbKeyRange*/ range, jint flags)
+{
+ int err;
+ DB *db;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey;
+ DB_KEY_RANGE result;
+ jfieldID fid;
+ jclass krclass;
+
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (!verify_non_null(jnienv, range))
+ return;
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, jkey, inOp) != 0)
+ goto out;
+ err = db->key_range(db, dbtxnid, &lkey.javainfo->dbt, &result, flags);
+ if (verify_return(jnienv, err, 0)) {
+ /* fill in the values of the DbKeyRange structure */
+ if ((krclass = get_class(jnienv, "DbKeyRange")) == NULL)
+ return; /* An exception has been posted. */
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "less", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.less);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "equal", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.equal);
+ fid = (*jnienv)->GetFieldID(jnienv, krclass, "greater", "D");
+ (*jnienv)->SetDoubleField(jnienv, range, fid, result.greater);
+ }
+ out:
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject rkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ DB_ENV *dbenv;
+ OpKind keyop, rkeyop, dataop;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, lrkey, ldata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out4;
+ dbenv = db->dbenv;
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ rkeyop = outOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ rkeyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lrkey, jnienv, dbenv, rkey, rkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->pget(db, dbtxnid, &lkey.javainfo->dbt,
+ &lrkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lrkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lrkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ out4:
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lrkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DB *db;
+ DB_ENV *dbenv;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (!verify_non_null(jnienv, db))
+ return (0); /* error will be thrown, retval doesn't matter */
+ dbenv = db->dbenv;
+
+ /*
+ * For DB_APPEND, the key may be output-only; for all other flags,
+ * it's input-only.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_APPEND)
+ keyop = outOp;
+ else
+ keyop = inOp;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, db))
+ goto out1;
+
+ err = db->put(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBPUT(err))
+ verify_return(jnienv, err, 0);
+
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out2;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out1;
+ err = db->remove(db, ls_file.string, ls_database.string, flags);
+
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+
+ out1:
+ locked_string_put(&ls_database, jnienv);
+ out2:
+ locked_string_put(&ls_file, jnienv);
+
+ dbji_dealloc(dbinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jstring newname, jint flags)
+{
+ int err;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+ LOCKED_STRING ls_newname;
+
+ db = get_DB(jnienv, jthis);
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out3;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out2;
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
+ goto out1;
+
+ err = db->rename(db, ls_file.string, ls_database.string,
+ ls_newname.string, flags);
+
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+
+ out1:
+ locked_string_put(&ls_newname, jnienv);
+ out2:
+ locked_string_put(&ls_database, jnienv);
+ out3:
+ locked_string_put(&ls_file, jnienv);
+
+ dbji_dealloc(dbinfo, jnienv);
+}
+
+JAVADB_METHOD(Db_set_1pagesize, (JAVADB_ARGS, jlong pagesize), DB,
+ set_pagesize, (c_this, (u_int32_t)pagesize))
+JAVADB_METHOD(Db_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+JAVADB_METHOD(Db_set_1cache_1priority, (JAVADB_ARGS, jint priority), DB,
+ set_cache_priority, (c_this, (DB_CACHE_PRIORITY)priority))
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_Db_set_1re_1source
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring re_source)
+{
+ int err;
+ DB *db;
+
+ db = get_DB(jnienv, jthis);
+ if (verify_non_null(jnienv, db)) {
+
+ /* XXX does the string from get_c_string ever get freed? */
+ if (re_source != NULL)
+ err = db->set_re_source(db,
+ get_c_string(jnienv, re_source));
+ else
+ err = db->set_re_source(db, 0);
+
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ DB *db;
+ DB_BTREE_STAT *bstp;
+ DB_HASH_STAT *hstp;
+ DB_QUEUE_STAT *qstp;
+ DBTYPE dbtype;
+ jobject retval;
+ jclass dbclass;
+ size_t bytesize;
+ void *statp;
+
+ bytesize = 0;
+ retval = NULL;
+ statp = NULL;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (NULL);
+
+ if (verify_return(jnienv, db->stat(db, &statp, flags), 0) &&
+ verify_return(jnienv, db->get_type(db, &dbtype), 0)) {
+ switch (dbtype) {
+ /* Btree and recno share the same stat structure */
+ case DB_BTREE:
+ case DB_RECNO:
+ bstp = (DB_BTREE_STAT *)statp;
+ bytesize = sizeof(DB_BTREE_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_BTREE_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_BTREE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_bt_stat(jnienv, dbclass, retval, bstp);
+ break;
+
+ /* Hash stat structure */
+ case DB_HASH:
+ hstp = (DB_HASH_STAT *)statp;
+ bytesize = sizeof(DB_HASH_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_HASH_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_HASH_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_h_stat(jnienv, dbclass, retval, hstp);
+ break;
+
+ case DB_QUEUE:
+ qstp = (DB_QUEUE_STAT *)statp;
+ bytesize = sizeof(DB_QUEUE_STAT);
+ retval = create_default_object(jnienv,
+ name_DB_QUEUE_STAT);
+ if ((dbclass =
+ get_class(jnienv, name_DB_QUEUE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_qam_stat(jnienv, dbclass, retval, qstp);
+ break;
+
+ /* That's all the database types we're aware of! */
+ default:
+ report_exception(jnienv,
+ "Db.stat not implemented for types"
+ " other than BTREE, HASH, QUEUE,"
+ " and RECNO",
+ EINVAL, 0);
+ break;
+ }
+ if (bytesize != 0)
+ __os_ufree(db->dbenv, statp);
+ }
+ return (retval);
+}
+
+JAVADB_METHOD(Db_sync, (JAVADB_ARGS, jint flags), DB,
+ sync, (c_this, flags))
+
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+ int err, isbyteswapped;
+
+ /* This value should never be seen, because of the exception. */
+ isbyteswapped = 0;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->get_byteswapped(db, &isbyteswapped);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jboolean)isbyteswapped);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
+ (JNIEnv *jnienv, /*Db*/ jobject jthis)
+{
+ DB *db;
+ int err;
+ DBTYPE dbtype;
+
+ /* This value should never be seen, because of the exception. */
+ dbtype = DB_UNKNOWN;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return (0);
+
+ err = db->get_type(db, &dbtype);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jint)dbtype);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ jstring file, jstring database, jint type, jint flags, jint mode)
+{
+ int err;
+ DB *db;
+ DB_TXN *dbtxnid;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+
+ /* Java is assumed to be threaded */
+ flags |= DB_THREAD;
+
+ db = get_DB(jnienv, jthis);
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out2;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
+ goto out1;
+ if (verify_non_null(jnienv, db)) {
+ err = db->open(db, dbtxnid, ls_file.string, ls_database.string,
+ (DBTYPE)type, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ }
+ out1:
+ locked_string_put(&ls_database, jnienv);
+ out2:
+ locked_string_put(&ls_file, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxnid, jint flags)
+{
+ int err;
+ DB *db;
+ u_int32_t count;
+ DB_TXN *dbtxnid;
+
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, jtxnid);
+ count = 0;
+ if (verify_non_null(jnienv, db)) {
+ err = db->truncate(db, dbtxnid, &count, flags);
+ verify_return(jnienv, err, 0);
+ }
+ return (jint)count;
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jint flags)
+{
+ int err;
+ DB *db = get_DB(jnienv, jthis);
+ LOCKED_STRING ls_name;
+
+ if (verify_non_null(jnienv, db)) {
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ goto out;
+ err = db->upgrade(db, ls_name.string, flags);
+ verify_return(jnienv, err, 0);
+ }
+ out:
+ locked_string_put(&ls_name, jnienv);
+}
+
+static int java_verify_callback(void *handle, const void *str_arg)
+{
+ char *str;
+ struct verify_callback_struct *vc;
+ int len;
+ JNIEnv *jnienv;
+
+ str = (char *)str_arg;
+ vc = (struct verify_callback_struct *)handle;
+ jnienv = vc->env;
+ len = strlen(str)+1;
+ if (len > vc->nbytes) {
+ vc->nbytes = len;
+ vc->bytes = (*jnienv)->NewByteArray(jnienv, len);
+ }
+
+ if (vc->bytes != NULL) {
+ (*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len,
+ (jbyte*)str);
+ (*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
+ vc->writemid, vc->bytes, 0, len-1);
+ }
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (EIO);
+
+ return (0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring name,
+ jstring subdb, jobject stream, jint flags)
+{
+ int err;
+ DB *db;
+ LOCKED_STRING ls_name;
+ LOCKED_STRING ls_subdb;
+ struct verify_callback_struct vcs;
+ jclass streamclass;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ goto out2;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto out1;
+
+ /* set up everything we need for the callbacks */
+ vcs.env = jnienv;
+ vcs.streamobj = stream;
+ vcs.nbytes = 100;
+ if ((vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes)) == NULL)
+ goto out1;
+
+ /* get the method ID for OutputStream.write(byte[], int, int); */
+ streamclass = (*jnienv)->FindClass(jnienv, "java/io/OutputStream");
+ vcs.writemid = (*jnienv)->GetMethodID(jnienv, streamclass,
+ "write", "([BII)V");
+
+ /* invoke verify - this will invoke the callback repeatedly. */
+ err = __db_verify_internal(db, ls_name.string, ls_subdb.string,
+ &vcs, java_verify_callback, flags);
+ verify_return(jnienv, err, 0);
+
+out1:
+ locked_string_put(&ls_subdb, jnienv);
+out2:
+ locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
+ (JNIEnv *jnienv, jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_JAVAINFO *dbinfo;
+ DB *db;
+
+ dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+ db = get_DB(jnienv, jthis);
+ DB_ASSERT(dbinfo != NULL);
+
+ /*
+ * Note: We can never be sure if the underlying DB is attached to
+ * a DB_ENV that was already closed. Sure, that's a user error,
+ * but it shouldn't crash the VM. Therefore, we cannot just
+ * automatically close if the handle indicates we are not yet
+ * closed. The best we can do is detect this and report it.
+ */
+ if (db != NULL) {
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "Db.finalize: open Db object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
+ set_private_info(jnienv, name_DB, jthis, 0);
+
+ dbji_destroy(dbinfo, jnienv);
+}
diff --git a/storage/bdb/libdb_java/java_DbEnv.c b/storage/bdb/libdb_java/java_DbEnv.c
new file mode 100644
index 00000000000..651c38a0e3d
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbEnv.c
@@ -0,0 +1,1450 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbEnv.c,v 11.105 2002/08/29 14:22:23 margo Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "java_stat_auto.h"
+#include "com_sleepycat_db_DbEnv.h"
+
+/* We keep these lined up, and alphabetical by field name,
+ * for comparison with C++'s list.
+ */
+JAVADB_SET_METH_STR(DbEnv, data_1dir, DB_ENV, data_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
+JAVADB_SET_METH_STR(DbEnv, lg_1dir, DB_ENV, lg_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1max, DB_ENV, lg_max)
+JAVADB_SET_METH(DbEnv, jint, lg_1regionmax, DB_ENV, lg_regionmax)
+JAVADB_SET_METH(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
+JAVADB_SET_METH(DbEnv, jint, lk_1max, DB_ENV, lk_max)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
+/* mp_mmapsize is declared below, it needs an extra cast */
+JAVADB_SET_METH_STR(DbEnv, tmp_1dir, DB_ENV, tmp_dir)
+JAVADB_SET_METH(DbEnv, jint, tx_1max, DB_ENV, tx_max)
+
+static void DbEnv_errcall_callback(const char *prefix, char *message)
+{
+ JNIEnv *jnienv;
+ DB_ENV_JAVAINFO *envinfo = (DB_ENV_JAVAINFO *)prefix;
+ jstring pre;
+
+ /*
+ * Note: these error cases are "impossible", and would
+ * normally warrant an exception. However, without
+ * a jnienv, we cannot throw an exception...
+ * We don't want to trap or exit, since the point of
+ * this facility is for the user to completely control
+ * error situations.
+ */
+ if (envinfo == NULL) {
+ /*
+ * Something is *really* wrong here, the
+ * prefix is set in every environment created.
+ */
+ fprintf(stderr, "Error callback failed!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ /* Should always succeed... */
+ jnienv = dbjie_get_jnienv(envinfo);
+
+ if (jnienv == NULL) {
+
+ /* But just in case... */
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ pre = dbjie_get_errpfx(envinfo, jnienv);
+ report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
+}
+
+static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
+ /*DbEnv*/ jobject jenv,
+ /*DbErrcall*/ jobject jerrcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *envinfo;
+
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jenv);
+ DB_ASSERT(envinfo == NULL);
+ envinfo = dbjie_construct(jnienv, jenv, jerrcall, is_dbopen);
+ set_private_info(jnienv, name_DB_ENV, jenv, envinfo);
+ dbenv->set_errpfx(dbenv, (const char*)envinfo);
+ dbenv->set_errcall(dbenv, DbEnv_errcall_callback);
+ dbenv->api2_internal = envinfo;
+ set_private_dbobj(jnienv, name_DB_ENV, jenv, dbenv);
+}
+
+/*
+ * This is called when this DbEnv was made on behalf of a Db
+ * created directly (without a parent DbEnv), and the Db is
+ * being closed. We'll zero out the pointer to the DB_ENV,
+ * since it is no longer valid, to prevent mistakes.
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ /*DbEnvFeedback*/ jobject jfeedback)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ dbjie_set_feedback_object(dbenvinfo, jnienv, dbenv, jfeedback);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+
+ err = db_env_create(&dbenv, flags);
+ if (verify_return(jnienv, err, 0))
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject /*DbErrcall*/ jerrcall,
+ /*Db*/ jobject jdb)
+{
+ DB_ENV *dbenv;
+ DB *db;
+
+ db = get_DB(jnienv, jdb);
+ dbenv = db->dbenv;
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home,
+ jint flags, jint mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ LOCKED_STRING ls_home;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
+ goto out;
+
+ /* Java is assumed to be threaded. */
+ flags |= DB_THREAD;
+
+ err = dbenv->open(dbenv, ls_home.string, flags, mode);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring db_home, jint flags)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ LOCKED_STRING ls_home;
+ int err = 0;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
+ goto out;
+
+ err = dbenv->remove(dbenv, ls_home.string, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ verify_return(jnienv, err, 0);
+ out:
+ locked_string_put(&ls_home, jnienv);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ err = dbenv->close(dbenv, flags);
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
+
+ /* Throw an exception if the close failed. */
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err1;
+
+ err = dbenv->dbremove(dbenv, txn, ls_name.string, ls_subdb.string,
+ flags);
+
+ /* Throw an exception if the dbremove failed. */
+ verify_return(jnienv, err, 0);
+
+ locked_string_put(&ls_subdb, jnienv);
+err1: locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jstring newname, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb, ls_newname;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err2;
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
+ goto err1;
+
+ err = dbenv->dbrename(dbenv, txn, ls_name.string, ls_subdb.string,
+ ls_newname.string, flags);
+
+ /* Throw an exception if the dbrename failed. */
+ verify_return(jnienv, err, 0);
+
+ locked_string_put(&ls_newname, jnienv);
+err1: locked_string_put(&ls_subdb, jnienv);
+err2: locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint ecode, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->err(dbenv, ecode, "%s", ls_msg.string);
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring msg)
+{
+ LOCKED_STRING ls_msg;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
+ goto out;
+
+ dbenv->errx(dbenv, "%s", ls_msg.string);
+ out:
+ locked_string_put(&ls_msg, jnienv);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
+ (JNIEnv *jnienv, jclass jthis_class, jint ecode)
+{
+ const char *message;
+
+ COMPQUIET(jthis_class, NULL);
+ message = db_strerror(ecode);
+ return (get_java_string(jnienv, message));
+}
+
+JAVADB_METHOD(DbEnv_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB_ENV,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jpasswd, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ LOCKED_STRING ls_passwd;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = dbenv->set_encrypt(dbenv, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
+}
+
+JAVADB_METHOD(DbEnv_set_1flags,
+ (JAVADB_ARGS, jint flags, jboolean onoff), DB_ENV,
+ set_flags, (c_this, flags, onoff ? 1 : 0))
+
+JAVADB_METHOD(DbEnv_set_1mp_1mmapsize, (JAVADB_ARGS, jlong value), DB_ENV,
+ set_mp_mmapsize, (c_this, (size_t)value))
+
+JAVADB_METHOD(DbEnv_set_1tas_1spins, (JAVADB_ARGS, jint spins), DB_ENV,
+ set_tas_spins, (c_this, (u_int32_t)spins))
+
+JAVADB_METHOD(DbEnv_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_ENV,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+ int err;
+ jsize i, len;
+ u_char *newarr;
+ int bytesize;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ len = (*jnienv)->GetArrayLength(jnienv, array);
+ bytesize = sizeof(u_char) * len * len;
+
+ if ((err = __os_malloc(dbenv, bytesize, &newarr)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return;
+ }
+
+ for (i=0; i<len; i++) {
+ jobject subArray =
+ (*jnienv)->GetObjectArrayElement(jnienv, array, i);
+ (*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
+ 0, len,
+ (jbyte *)&newarr[i*len]);
+ }
+ dbjie_set_conflict(dbenvinfo, newarr, bytesize);
+ err = dbenv->set_lk_conflicts(dbenv, newarr, len);
+ verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint nsites, jint pri,
+ jint timeout)
+{
+ DB_ENV *dbenv;
+ int err, id;
+
+ if (!verify_non_null(jnienv, jthis))
+ return (DB_EID_INVALID);
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ err = dbenv->rep_elect(dbenv, (int)nsites,
+ (int)pri, (u_int32_t)timeout, &id);
+ verify_return(jnienv, err, 0);
+
+ return ((jint)id);
+}
+
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject control,
+ /* Dbt */ jobject rec, /* RepProcessMessage */ jobject result)
+{
+ DB_ENV *dbenv;
+ LOCKED_DBT cdbt, rdbt;
+ int err, envid;
+
+ if (!verify_non_null(jnienv, jthis) || !verify_non_null(jnienv, result))
+ return (-1);
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ err = 0;
+
+ /* The DBTs are always inputs. */
+ if (locked_dbt_get(&cdbt, jnienv, dbenv, control, inOp) != 0)
+ goto out2;
+ if (locked_dbt_get(&rdbt, jnienv, dbenv, rec, inOp) != 0)
+ goto out1;
+
+ envid = (*jnienv)->GetIntField(jnienv,
+ result, fid_RepProcessMessage_envid);
+
+ err = dbenv->rep_process_message(dbenv, &cdbt.javainfo->dbt,
+ &rdbt.javainfo->dbt, &envid);
+
+ if (err == DB_REP_NEWMASTER)
+ (*jnienv)->SetIntField(jnienv,
+ result, fid_RepProcessMessage_envid, envid);
+ else if (!DB_RETOK_REPPMSG(err))
+ verify_return(jnienv, err, 0);
+
+out1: locked_dbt_put(&rdbt, jnienv, dbenv);
+out2: locked_dbt_put(&cdbt, jnienv, dbenv);
+
+ return (err);
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject cookie,
+ jint flags)
+{
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ LOCKED_DBT ldbt;
+ int err;
+
+ if (!verify_non_null(jnienv, jthis))
+ return;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ /* The Dbt cookie may be null; if so, pass in a NULL DBT. */
+ if (cookie != NULL) {
+ if (locked_dbt_get(&ldbt, jnienv, dbenv, cookie, inOp) != 0)
+ goto out;
+ dbtp = &ldbt.javainfo->dbt;
+ } else
+ dbtp = NULL;
+
+ err = dbenv->rep_start(dbenv, dbtp, flags);
+ verify_return(jnienv, err, 0);
+
+out: if (cookie != NULL)
+ locked_dbt_put(&ldbt, jnienv, dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_REP_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->rep_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_REP_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_REP_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_rep_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT void JNICALL
+Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint gbytes, jint bytes)
+{
+ DB_ENV *dbenv;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_rep_limit(dbenv,
+ (u_int32_t)gbytes, (u_int32_t)bytes);
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint envid,
+ /* DbRepTransport */ jobject jreptransport)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo) ||
+ !verify_non_null(jnienv, jreptransport))
+ return;
+
+ dbjie_set_rep_transport_object(dbenvinfo,
+ jnienv, dbenv, envid, jreptransport);
+}
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbClient*/ jobject jclient,
+ jstring jhost, jlong tsec, jlong ssec, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ const char *host = (*jnienv)->GetStringUTFChars(jnienv, jhost, NULL);
+
+ if (jclient != NULL) {
+ report_exception(jnienv, "DbEnv.set_rpc_server client arg "
+ "must be null; reserved for future use",
+ EINVAL, 0);
+ return;
+ }
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_rpc_server(dbenv, NULL, host,
+ (long)tsec, (long)ssec, flags);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JAVADB_METHOD(DbEnv_set_1shm_1key, (JAVADB_ARGS, jlong shm_key), DB_ENV,
+ set_shm_key, (c_this, (long)shm_key))
+
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong seconds)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ time_t time = seconds;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_tx_timestamp(dbenv, &time);
+
+ /* Throw an exception if the call failed. */
+ verify_return(jnienv, err, 0);
+ }
+}
+
+JAVADB_METHOD(DbEnv_set_1verbose,
+ (JAVADB_ARGS, jint which, jboolean onoff), DB_ENV,
+ set_verbose, (c_this, which, onoff ? 1 : 0))
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MAJOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1minor
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_MINOR);
+}
+
+/*static*/
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1patch
+ (JNIEnv * jnienv, jclass this_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(this_class, NULL);
+
+ return (DB_VERSION_PATCH);
+}
+
+/*static*/
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1string
+ (JNIEnv *jnienv, jclass this_class)
+{
+ COMPQUIET(this_class, NULL);
+
+ return ((*jnienv)->NewStringUTF(jnienv, DB_VERSION_STRING));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+{
+ int err;
+ u_int32_t id;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (-1);
+ err = dbenv->lock_id(dbenv, &id);
+ verify_return(jnienv, err, 0);
+ return (id);
+}
+
+JAVADB_METHOD(DbEnv_lock_1id_1free, (JAVADB_ARGS, jint id), DB_ENV,
+ lock_id_free, (c_this, id))
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LOCK_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->lock_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_LOCK_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOCK_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_lock_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint atype, jint flags)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int aborted;
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ err = dbenv->lock_detect(dbenv, atype, flags, &aborted);
+ verify_return(jnienv, err, 0);
+ return (aborted);
+}
+
+JNIEXPORT /*DbLock*/ jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobject obj, /*db_lockmode_t*/ jint lock_mode)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+ LOCKED_DBT lobj;
+ /*DbLock*/ jobject retval;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ if ((err = __os_malloc(dbenv, sizeof(DB_LOCK), &dblock)) != 0)
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+
+ memset(dblock, 0, sizeof(DB_LOCK));
+ err = 0;
+ retval = NULL;
+ if (locked_dbt_get(&lobj, jnienv, dbenv, obj, inOp) != 0)
+ goto out;
+
+ err = dbenv->lock_get(dbenv, locker, flags, &lobj.javainfo->dbt,
+ (db_lockmode_t)lock_mode, dblock);
+
+ if (err == DB_LOCK_NOTGRANTED)
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_get not granted",
+ DB_LOCK_GET, lock_mode, obj,
+ NULL, -1);
+ else if (verify_return(jnienv, err, 0)) {
+ retval = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, retval, dblock);
+ }
+
+ out:
+ locked_dbt_put(&lobj, jnienv, dbenv);
+ return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobjectArray list, jint offset, jint count)
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ *lockreq;
+ DB_LOCKREQ *prereq; /* preprocessed requests */
+ DB_LOCKREQ *failedreq;
+ DB_LOCK *lockp;
+ LOCKED_DBT *locked_dbts;
+ int err;
+ int alloc_err;
+ int i;
+ size_t bytesize;
+ size_t ldbtsize;
+ jobject jlockreq;
+ db_lockop_t op;
+ jobject jobj;
+ jobject jlock;
+ int completed;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ goto out0;
+
+ if ((*jnienv)->GetArrayLength(jnienv, list) < offset + count) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec array not large enough",
+ 0, 0);
+ goto out0;
+ }
+
+ bytesize = sizeof(DB_LOCKREQ) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &lockreq)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out0;
+ }
+ memset(lockreq, 0, bytesize);
+
+ ldbtsize = sizeof(LOCKED_DBT) * count;
+ if ((err = __os_malloc(dbenv, ldbtsize, &locked_dbts)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out1;
+ }
+ memset(lockreq, 0, ldbtsize);
+ prereq = &lockreq[0];
+
+ /* fill in the lockreq array */
+ for (i = 0, prereq = &lockreq[0]; i < count; i++, prereq++) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv, list,
+ offset + i);
+ if (jlockreq == NULL) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec list entry is null",
+ 0, 0);
+ goto out2;
+ }
+ op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_op);
+ prereq->op = op;
+
+ switch (op) {
+ case DB_LOCK_GET_TIMEOUT:
+ /* Needed: mode, timeout, obj. Returned: lock. */
+ prereq->op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_timeout);
+ /* FALLTHROUGH */
+ case DB_LOCK_GET:
+ /* Needed: mode, obj. Returned: lock. */
+ prereq->mode = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_mode);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ case DB_LOCK_PUT:
+ /* Needed: lock. Ignored: mode, obj. */
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ if (!verify_non_null(jnienv, jlock))
+ goto out2;
+ lockp = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, lockp))
+ goto out2;
+
+ prereq->lock = *lockp;
+ break;
+ case DB_LOCK_PUT_ALL:
+ case DB_LOCK_TIMEOUT:
+ /* Needed: (none). Ignored: lock, mode, obj. */
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Needed: obj. Ignored: lock, mode. */
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ default:
+ report_exception(jnienv,
+ "DbEnv.lock_vec bad op value",
+ 0, 0);
+ goto out2;
+ }
+ }
+
+ err = dbenv->lock_vec(dbenv, locker, flags, lockreq, count, &failedreq);
+ if (err == 0)
+ completed = count;
+ else
+ completed = failedreq - lockreq;
+
+ /* do post processing for any and all requests that completed */
+ for (i = 0; i < completed; i++) {
+ op = lockreq[i].op;
+ if (op == DB_LOCK_PUT) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it.
+ */
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ lockp = get_DB_LOCK(jnienv, jlock);
+ __os_free(NULL, lockp);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+ else if (op == DB_LOCK_GET) {
+ /*
+ * Store the lock that was obtained.
+ * We need to create storage for it since
+ * the lockreq array only exists during this
+ * method call.
+ */
+ alloc_err = __os_malloc(dbenv, sizeof(DB_LOCK), &lockp);
+ if (!verify_return(jnienv, alloc_err, 0))
+ goto out2;
+
+ *lockp = lockreq[i].lock;
+
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, lockp);
+ (*jnienv)->SetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock,
+ jlock);
+ }
+ }
+
+ /* If one of the locks was not granted, build the exception now. */
+ if (err == DB_LOCK_NOTGRANTED && i < count) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_vec incomplete",
+ lockreq[i].op,
+ lockreq[i].mode,
+ jobj,
+ jlock,
+ i);
+ }
+ else
+ verify_return(jnienv, err, 0);
+
+ out2:
+ /* Free the dbts that we have locked */
+ for (i = 0 ; i < (prereq - lockreq); i++) {
+ if ((op = lockreq[i].op) == DB_LOCK_GET ||
+ op == DB_LOCK_PUT_OBJ)
+ locked_dbt_put(&locked_dbts[i], jnienv, dbenv);
+ }
+ __os_free(dbenv, locked_dbts);
+
+ out1:
+ __os_free(dbenv, lockreq);
+
+ out0:
+ return;
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *jnienv, jobject jthis, /*DbLock*/ jobject jlock)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ dblock = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, dblock))
+ return;
+
+ err = dbenv->lock_put(dbenv, dblock);
+ if (verify_return(jnienv, err, 0)) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it
+ * (allocated in DbEnv.lock_get()).
+ */
+ __os_free(NULL, dblock);
+
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err, len, i;
+ char** ret;
+ jclass stringClass;
+ jobjectArray strarray;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ strarray = NULL;
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+ err = dbenv->log_archive(dbenv, &ret, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ if (ret != NULL) {
+ len = 0;
+ while (ret[len] != NULL)
+ len++;
+ stringClass = (*jnienv)->FindClass(jnienv, "java/lang/String");
+ if ((strarray = (*jnienv)->NewObjectArray(jnienv,
+ len, stringClass, 0)) == NULL)
+ goto out;
+ for (i=0; i<len; i++) {
+ jstring str = (*jnienv)->NewStringUTF(jnienv, ret[i]);
+ (*jnienv)->SetObjectArrayElement(jnienv, strarray,
+ i, str);
+ }
+ }
+out: return (strarray);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
+ (JNIEnv *jnienv, jclass jthis_class,
+ /*DbLsn*/ jobject lsn0, /*DbLsn*/ jobject lsn1)
+{
+ DB_LSN *dblsn0;
+ DB_LSN *dblsn1;
+
+ COMPQUIET(jthis_class, NULL);
+ dblsn0 = get_DB_LSN(jnienv, lsn0);
+ dblsn1 = get_DB_LSN(jnienv, lsn1);
+
+ return (log_compare(dblsn0, dblsn1));
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+ err = dbenv->log_cursor(dbenv, &dblogc, flags);
+ verify_return(jnienv, err, 0);
+ return (get_DbLogc(jnienv, dblogc));
+}
+
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
+ char filename[FILENAME_MAX+1] = "";
+
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->log_file(dbenv, dblsn, filename, FILENAME_MAX);
+ verify_return(jnienv, err, 0);
+ filename[FILENAME_MAX] = '\0'; /* just to be sure */
+ return (get_java_string(jnienv, filename));
+}
+
+JAVADB_METHOD(DbEnv_log_1flush,
+ (JAVADB_ARGS, /*DbLsn*/ jobject lsn), DB_ENV,
+ log_flush, (c_this, get_DB_LSN(jnienv, lsn)))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
+ /*DbDbt*/ jobject data, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ /* log_put's DB_LSN argument may not be NULL. */
+ if (!verify_non_null(jnienv, dblsn))
+ return;
+
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out;
+
+ err = dbenv->log_put(dbenv, dblsn, &ldata.javainfo->dbt, flags);
+ verify_return(jnienv, err, 0);
+ out:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOG_STAT *statp;
+ jobject retval;
+ jclass dbclass;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->log_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_LOG_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOG_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_log_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ jclass dbclass;
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT *statp;
+ jobject retval;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->memp_stat(dbenv, &statp, 0, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_MPOOL_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_MPOOL_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_mpool_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err, i, len;
+ jclass fstat_class;
+ DB_ENV *dbenv;
+ DB_MPOOL_FSTAT **fstatp;
+ jobjectArray retval;
+ jfieldID filename_id;
+ jstring jfilename;
+
+ fstatp = NULL;
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->memp_stat(dbenv, 0, &fstatp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ len = 0;
+ while (fstatp[len] != NULL)
+ len++;
+ if ((fstat_class =
+ get_class(jnienv, name_DB_MPOOL_FSTAT)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, len,
+ fstat_class, 0)) == NULL)
+ goto err;
+ for (i=0; i<len; i++) {
+ jobject obj;
+ if ((obj = create_default_object(jnienv,
+ name_DB_MPOOL_FSTAT)) == NULL)
+ goto err;
+ (*jnienv)->SetObjectArrayElement(jnienv, retval,
+ i, obj);
+
+ /* Set the string field. */
+ filename_id = (*jnienv)->GetFieldID(jnienv,
+ fstat_class, "file_name", string_signature);
+ jfilename = get_java_string(jnienv,
+ fstatp[i]->file_name);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ filename_id, jfilename);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_pagesize", fstatp[i]->st_pagesize);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_hit", fstatp[i]->st_cache_hit);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_cache_miss", fstatp[i]->st_cache_miss);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_map", fstatp[i]->st_map);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_create", fstatp[i]->st_page_create);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_in", fstatp[i]->st_page_in);
+ set_int_field(jnienv, fstat_class, obj,
+ "st_page_out", fstatp[i]->st_page_out);
+ __os_ufree(dbenv, fstatp[i]);
+ }
+err: __os_ufree(dbenv, fstatp);
+ }
+ return (retval);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint pct)
+{
+ int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ int result = 0;
+
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->memp_trickle(dbenv, pct, &result);
+ verify_return(jnienv, err, 0);
+ }
+ return (result);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject pid, jint flags)
+{
+ int err;
+ DB_TXN *dbpid, *result;
+ DB_ENV *dbenv;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (0);
+
+ dbpid = get_DB_TXN(jnienv, pid);
+ result = 0;
+
+ err = dbenv->txn_begin(dbenv, dbpid, &result, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+ return (get_DbTxn(jnienv, result));
+}
+
+JAVADB_METHOD(DbEnv_txn_1checkpoint,
+ (JAVADB_ARGS, jint kbyte, jint min, jint flags), DB_ENV,
+ txn_checkpoint, (c_this, kbyte, min, flags))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jappdispatch)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ dbjie_set_app_dispatch_object(dbenvinfo, jnienv, dbenv, jappdispatch);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint count, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_PREPLIST *preps;
+ long retcount;
+ int i;
+ char signature[128];
+ size_t bytesize;
+ jobject retval;
+ jobject obj;
+ jobject txnobj;
+ jbyteArray bytearr;
+ jclass preplist_class;
+ jfieldID txn_fieldid;
+ jfieldID gid_fieldid;
+
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0) {
+ verify_return(jnienv, EINVAL, 0);
+ goto out;
+ }
+
+ bytesize = sizeof(DB_PREPLIST) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &preps)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out;
+ }
+
+ err = dbenv->txn_recover(dbenv, preps, count, &retcount, flags);
+
+ if (verify_return(jnienv, err, 0)) {
+ if ((preplist_class =
+ get_class(jnienv, name_DB_PREPLIST)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, retcount,
+ preplist_class, 0)) == NULL)
+ goto err;
+
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, name_DB_TXN);
+ txn_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "txn", signature);
+ gid_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "gid", "[B");
+
+ for (i=0; i<retcount; i++) {
+ /*
+ * First, make a blank DbPreplist object
+ * and set the array entry.
+ */
+ if ((obj = create_default_object(jnienv,
+ name_DB_PREPLIST)) == NULL)
+ goto err;
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ retval, i, obj);
+
+ /* Set the txn field. */
+ txnobj = get_DbTxn(jnienv, preps[i].txn);
+ (*jnienv)->SetObjectField(jnienv,
+ obj, txn_fieldid, txnobj);
+
+ /* Build the gid array and set the field. */
+ if ((bytearr = (*jnienv)->NewByteArray(jnienv,
+ sizeof(preps[i].gid))) == NULL)
+ goto err;
+ (*jnienv)->SetByteArrayRegion(jnienv, bytearr, 0,
+ sizeof(preps[i].gid), (jbyte *)&preps[i].gid[0]);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ gid_fieldid, bytearr);
+ }
+ }
+err: __os_free(dbenv, preps);
+out: return (retval);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_TXN_STAT *statp;
+ jobject retval, obj;
+ jclass dbclass, active_class;
+ char active_signature[512];
+ jfieldID arrid;
+ jobjectArray actives;
+ unsigned int i;
+
+ retval = NULL;
+ statp = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->txn_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_TXN_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_TXN_STAT)) == NULL)
+ goto err;
+
+ /* Set the individual fields */
+ __jv_fill_txn_stat(jnienv, dbclass, retval, statp);
+
+ if ((active_class =
+ get_class(jnienv, name_DB_TXN_STAT_ACTIVE)) == NULL ||
+ (actives = (*jnienv)->NewObjectArray(jnienv,
+ statp->st_nactive, active_class, 0)) == NULL)
+ goto err;
+
+ /*
+ * Set the st_txnarray field. This is a little more involved
+ * than other fields, since the type is an array, so none
+ * of our utility functions help.
+ */
+ (void)snprintf(active_signature, sizeof(active_signature),
+ "[L%s%s;", DB_PACKAGE_NAME, name_DB_TXN_STAT_ACTIVE);
+
+ arrid = (*jnienv)->GetFieldID(jnienv, dbclass, "st_txnarray",
+ active_signature);
+ (*jnienv)->SetObjectField(jnienv, retval, arrid, actives);
+
+ /* Now fill the in the elements of st_txnarray. */
+ for (i=0; i<statp->st_nactive; i++) {
+ obj = create_default_object(jnienv,
+ name_DB_TXN_STAT_ACTIVE);
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ actives, i, obj);
+
+ set_int_field(jnienv, active_class, obj,
+ "txnid", statp->st_txnarray[i].txnid);
+ set_int_field(jnienv, active_class, obj, "parentid",
+ statp->st_txnarray[i].parentid);
+ set_lsn_field(jnienv, active_class, obj,
+ "lsn", statp->st_txnarray[i].lsn);
+ }
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
+}
+
+/* See discussion on errpfx, errcall in DB_ENV_JAVAINFO */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobject errcall)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+ dbjie_set_errcall(dbenvinfo, jnienv, errcall);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring str)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+
+ if (verify_non_null(jnienv, dbenv) &&
+ verify_non_null(jnienv, dbenvinfo)) {
+ dbjie_set_errpfx(dbenvinfo, jnienv, str);
+ }
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ jobject /*DbErrcall*/ errcall, jstring errpfx)
+{
+ DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *envinfo;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ envinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ DB_ASSERT(envinfo != NULL);
+
+ /* Note: We detect and report unclosed DbEnvs. */
+ if (dbenv != NULL && envinfo != NULL && !dbjie_is_dbopen(envinfo)) {
+
+ /* If this error occurs, this object was never closed. */
+ report_errcall(jnienv, errcall, errpfx,
+ "DbEnv.finalize: open DbEnv object destroyed");
+ }
+
+ /* Shouldn't see this object again, but just in case */
+ set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
+ set_private_info(jnienv, name_DB_ENV, jthis, 0);
+
+ dbjie_destroy(envinfo, jnienv);
+}
diff --git a/storage/bdb/libdb_java/java_DbLock.c b/storage/bdb/libdb_java/java_DbLock.c
new file mode 100644
index 00000000000..00a9836bfa0
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbLock.c
@@ -0,0 +1,30 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLock.c,v 11.12 2002/02/28 21:27:38 ubell Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLock.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
+ if (dblock) {
+ /* Free any data related to DB_LOCK here */
+ __os_free(NULL, dblock);
+ }
+ set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0); /* paranoia */
+}
diff --git a/storage/bdb/libdb_java/java_DbLogc.c b/storage/bdb/libdb_java/java_DbLogc.c
new file mode 100644
index 00000000000..69294d9baac
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbLogc.c
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLogc.c,v 11.6 2002/07/02 12:03:03 mjc Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLogc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc = get_DB_LOGC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dblogc))
+ return;
+ err = dblogc->close(dblogc, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB_LOGC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*DbLsn*/ jobject lsn, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry;
+ DB_LOGC *dblogc;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+ OpKind dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ dataop = outOp;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (locked_dbt_get(&ldata, jnienv, dblogc->dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dblogc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dblogc->get(dblogc, dblsn, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&ldata, jnienv, dblogc->dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dblogc->dbenv);
+ if (!DB_RETOK_LGGET(err)) {
+ if (verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /*
+ * Free any data related to DB_LOGC here.
+ * If we ever have java-only data embedded in the DB_LOGC
+ * and need to do this, we'll have to track DbLogc's
+ * according to which DbEnv owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DB_LOGC *dblogc;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ if (dblogc != NULL)
+ fprintf(stderr, "Java API: DbLogc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/storage/bdb/libdb_java/java_DbLsn.c b/storage/bdb/libdb_java/java_DbLsn.c
new file mode 100644
index 00000000000..d53082826f4
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbLsn.c
@@ -0,0 +1,43 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLsn.c,v 11.12 2002/05/07 16:12:41 dda Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h> /* needed for FILENAME_MAX */
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLsn.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
+ (JNIEnv *jnienv, /*DbLsn*/ jobject jthis)
+{
+ /*
+ * Note: the DB_LSN object stored in the private_dbobj_
+ * is allocated in get_DbLsn() or get_DB_LSN().
+ */
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DB_LSN *dblsn;
+
+ dblsn = get_DB_LSN(jnienv, jthis);
+ if (dblsn) {
+ (void)__os_free(NULL, dblsn);
+ }
+}
diff --git a/storage/bdb/libdb_java/java_DbTxn.c b/storage/bdb/libdb_java/java_DbTxn.c
new file mode 100644
index 00000000000..51195501b77
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbTxn.c
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbTxn.c,v 11.16 2002/08/06 05:19:05 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbTxn.h"
+
+JAVADB_METHOD(DbTxn_abort, (JAVADB_ARGS), DB_TXN,
+ abort, (c_this))
+JAVADB_METHOD(DbTxn_commit, (JAVADB_ARGS, jint flags), DB_TXN,
+ commit, (c_this, flags))
+JAVADB_METHOD(DbTxn_discard, (JAVADB_ARGS, jint flags), DB_TXN,
+ discard, (c_this, flags))
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int retval = 0;
+ DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return (-1);
+
+ /* No error to check for from DB_TXN->id */
+ retval = dbtxn->id(dbtxn);
+ return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
+ (JNIEnv *jnienv, jobject jthis, jbyteArray gid)
+{
+ int err;
+ DB_TXN *dbtxn;
+ jbyte *c_array;
+
+ dbtxn = get_DB_TXN(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbtxn))
+ return;
+
+ if (gid == NULL ||
+ (*jnienv)->GetArrayLength(jnienv, gid) < DB_XIDDATASIZE) {
+ report_exception(jnienv, "DbTxn.prepare gid array "
+ "must be >= 128 bytes", EINVAL, 0);
+ return;
+ }
+ c_array = (*jnienv)->GetByteArrayElements(jnienv, gid, NULL);
+ err = dbtxn->prepare(dbtxn, (u_int8_t *)c_array);
+ (*jnienv)->ReleaseByteArrayElements(jnienv, gid, c_array, 0);
+ verify_return(jnienv, err, 0);
+}
+
+JAVADB_METHOD(DbTxn_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_TXN,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
diff --git a/storage/bdb/libdb_java/java_DbUtil.c b/storage/bdb/libdb_java/java_DbUtil.c
new file mode 100644
index 00000000000..edcbc6d9f15
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbUtil.c
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbUtil.c,v 1.5 2002/01/11 15:52:44 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbUtil.h"
+
+JNIEXPORT jboolean JNICALL
+Java_com_sleepycat_db_DbUtil_is_1big_1endian (JNIEnv *jnienv,
+ jclass jthis_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis_class, NULL);
+
+ return (__db_isbigendian() ? JNI_TRUE : JNI_FALSE);
+}
diff --git a/storage/bdb/libdb_java/java_DbXAResource.c b/storage/bdb/libdb_java/java_DbXAResource.c
new file mode 100644
index 00000000000..609529bfe83
--- /dev/null
+++ b/storage/bdb/libdb_java/java_DbXAResource.c
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbXAResource.c,v 11.6 2002/08/06 05:19:06 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "com_sleepycat_db_xa_DbXAResource.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_open((char *)ls_home.string,
+ rmid, flags)) != XA_OK) {
+ verify_return(jnienv, err, EXCEPTION_XA);
+ }
+
+ /*
+ * Now create the DbEnv object, it will get attached
+ * to the DB_ENV just made in __db_xa_open.
+ */
+ if ((cl = get_class(jnienv, name_DB_ENV)) == NULL)
+ goto out;
+
+ mid = (*jnienv)->GetStaticMethodID(jnienv, cl,
+ "_create_DbEnv_for_XA", "(II)V");
+ (*jnienv)->CallStaticVoidMethod(jnienv, cl, mid, 0, rmid);
+
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_close((char *)ls_home.string,
+ rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid,
+ jboolean onePhase)
+{
+ XID xid;
+ long flags;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ flags = 0;
+ if (onePhase == JNI_TRUE)
+ flags |= TMONEPHASE;
+ if ((err = __db_xa_commit(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_end(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_forget(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return (0);
+ err = __db_xa_prepare(&xid, rmid, 0);
+ if (err != XA_OK && err != XA_RDONLY)
+ verify_return(jnienv, err, EXCEPTION_XA);
+
+ return (err);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *jnienv, jobject jthis, jint rmid, jint flags)
+{
+ XID *xids;
+ int err;
+ int total;
+ int cnt;
+ int i;
+ int curflags;
+ size_t nbytes;
+ jclass xid_class;
+ jmethodID mid;
+ jobject obj;
+ jobjectArray retval;
+
+ COMPQUIET(jthis, NULL);
+ total = 0;
+ cnt = 0;
+ xids = NULL;
+ flags &= ~(DB_FIRST | DB_LAST | DB_NEXT);
+
+ /* Repeatedly call __db_xa_recover to fill up an array of XIDs */
+ curflags = flags | DB_FIRST;
+ do {
+ total += cnt;
+ nbytes = sizeof(XID) * (total + 10);
+ if ((err = __os_realloc(NULL, nbytes, &xids)) != 0) {
+ if (xids != NULL)
+ __os_free(NULL, xids);
+ verify_return(jnienv, XAER_NOTA, EXCEPTION_XA);
+ return (NULL);
+ }
+ cnt = __db_xa_recover(&xids[total], 10, rmid, curflags);
+ curflags = flags | DB_NEXT;
+ } while (cnt > 0);
+
+ if (xids != NULL)
+ __os_free(NULL, xids);
+
+ if (cnt < 0) {
+ verify_return(jnienv, cnt, EXCEPTION_XA);
+ return (NULL);
+ }
+
+ /* Create the java DbXid array and fill it up */
+ if ((xid_class = get_class(jnienv, name_DB_XID)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, xid_class, "<init>",
+ "(I[B[B)V");
+ if ((retval = (*jnienv)->NewObjectArray(jnienv, total, xid_class, 0))
+ == NULL)
+ goto out;
+
+ for (i = 0; i < total; i++) {
+ jobject gtrid;
+ jobject bqual;
+ jsize gtrid_len;
+ jsize bqual_len;
+
+ gtrid_len = (jsize)xids[i].gtrid_length;
+ bqual_len = (jsize)xids[i].bqual_length;
+ gtrid = (*jnienv)->NewByteArray(jnienv, gtrid_len);
+ bqual = (*jnienv)->NewByteArray(jnienv, bqual_len);
+ if (gtrid == NULL || bqual == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, gtrid, 0, gtrid_len,
+ (jbyte *)&xids[i].data[0]);
+ (*jnienv)->SetByteArrayRegion(jnienv, bqual, 0, bqual_len,
+ (jbyte *)&xids[i].data[gtrid_len]);
+ if ((obj = (*jnienv)->NewObject(jnienv, xid_class, mid,
+ (jint)xids[i].formatID, gtrid, bqual)) == NULL)
+ goto out;
+ (*jnienv)->SetObjectArrayElement(jnienv, retval, i, obj);
+ }
+out: return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_rollback(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+
+ if ((err = __db_xa_start(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *jnienv, jclass jthisclass, jobject jxid, jobject jrmid)
+{
+ XID xid;
+ XID *xidp;
+ int ret;
+ DB_ENV *env;
+ DB_TXN *txn;
+ int rmid;
+ int *rmidp;
+ jobject jtxn;
+ jobject jenv;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthisclass, NULL);
+ if (jxid == NULL) {
+ xidp = NULL;
+ }
+ else {
+ xidp = &xid;
+ if (!get_XID(jnienv, jxid, &xid))
+ return (NULL);
+ }
+ if (jrmid == NULL) {
+ rmidp = NULL;
+ }
+ else {
+ rmidp = &rmid;
+ rmid = (int)(*jnienv)->CallIntMethod(jnienv, jrmid,
+ mid_Integer_intValue);
+ }
+
+ if ((ret = db_env_xa_attach(rmidp, xidp, &env, &txn)) != 0) {
+ /*
+ * DB_NOTFOUND is a normal return, it means we
+ * have no current transaction,
+ */
+ if (ret != DB_NOTFOUND)
+ verify_return(jnienv, ret, 0);
+ return (NULL);
+ }
+
+ jenv = ((DB_ENV_JAVAINFO *)env->api2_internal)->jenvref;
+ jtxn = get_DbTxn(jnienv, txn);
+ if ((cl = get_class(jnienv, name_DB_XAATTACH)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, cl, "<init>",
+ "(Lcom/sleepycat/db/DbEnv;Lcom/sleepycat/db/DbTxn;)V");
+ return (*jnienv)->NewObject(jnienv, cl, mid, jenv, jtxn);
+}
diff --git a/storage/bdb/libdb_java/java_Dbc.c b/storage/bdb/libdb_java/java_Dbc.c
new file mode 100644
index 00000000000..63ab368fc03
--- /dev/null
+++ b/storage/bdb/libdb_java/java_Dbc.c
@@ -0,0 +1,278 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Dbc.c,v 11.23 2002/08/06 05:19:06 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_close
+ (JNIEnv *jnienv, jobject jthis)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dbc))
+ return;
+ err = dbc->c_close(dbc);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DBC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ db_recno_t count;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_count(dbc, &count, flags);
+ verify_return(jnienv, err, 0);
+ return (count);
+}
+
+JAVADB_METHOD_INT(Dbc_del, (JAVADB_ARGS, jint flags), DBC,
+ c_del, (c_this, flags), DB_RETOK_DBCDEL)
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DBC *dbc = get_DBC(jnienv, jthis);
+ DBC *dbc_ret = NULL;
+
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ err = dbc->c_dup(dbc, &dbc_ret, flags);
+ if (!verify_return(jnienv, err, 0))
+ return (0);
+
+ return (get_Dbc(jnienv, dbc_ret));
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop, dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_get(dbc,
+ &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv,
+ dbenv) && !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject pkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, lpkey, ldata;
+ OpKind keyop, pkeyop, dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ pkeyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
+ pkeyop = inOutOp;
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lpkey, jnienv, dbenv, pkey, pkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_pget(dbc, &lkey.javainfo->dbt,
+ &lpkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lpkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lpkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lpkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject data, jint flags)
+{
+ int err;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
+
+ err = 0;
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ keyop = (dbc->dbp->type == DB_RECNO &&
+ (flags == DB_BEFORE || flags == DB_AFTER)) ? outOp : inOp;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+ err = dbc->c_put(dbc, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBCPUT(err))
+ verify_return(jnienv, err, 0);
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /*
+ * Free any data related to DBC here.
+ * If we ever have java-only data embedded in the DBC
+ * and need to do this, we'll have to track Dbc's
+ * according to which Db owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DBC *dbc;
+
+ dbc = get_DBC(jnienv, jthis);
+ if (dbc != NULL)
+ fprintf(stderr, "Java API: Dbc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/storage/bdb/libdb_java/java_Dbt.c b/storage/bdb/libdb_java/java_Dbt.c
new file mode 100644
index 00000000000..d21109f3408
--- /dev/null
+++ b/storage/bdb/libdb_java/java_Dbt.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_Dbt.c,v 11.18 2002/06/20 11:11:55 mjc Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_Dbt.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = dbjit_construct();
+ set_private_dbobj(jnienv, name_DBT, jthis, dbtji);
+}
+
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *db_this;
+ jbyteArray arr = NULL;
+ int len;
+
+ db_this = get_DBT_JAVAINFO(jnienv, jthis);
+ if (verify_non_null(jnienv, db_this)) {
+ len = db_this->dbt.size;
+ if ((arr = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
+ db_this->dbt.data);
+ }
+out: return (arr);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ DBT_JAVAINFO *dbtji;
+
+ dbtji = get_DBT_JAVAINFO(jnienv, jthis);
+ if (dbtji) {
+ /* Free any data related to DBT here */
+ dbjit_destroy(dbtji);
+ }
+}
diff --git a/storage/bdb/libdb_java/java_info.c b/storage/bdb/libdb_java/java_info.c
new file mode 100644
index 00000000000..22fcbd23d46
--- /dev/null
+++ b/storage/bdb/libdb_java/java_info.c
@@ -0,0 +1,1125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_info.c,v 11.46 2002/08/29 14:22:23 margo Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Callback functions
+ */
+
+static int Db_assoc_callback(DB *db,
+ const DBT *key,
+ const DBT *data,
+ DBT *retval)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_assoc(dbinfo, db, dbinfo->jdbref,
+ key, data, retval));
+}
+
+static void Db_feedback_callback(DB *db, int opcode, int percent)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ dbji_call_feedback(dbinfo, db, dbinfo->jdbref, opcode, percent);
+}
+
+static int Db_append_recno_callback(DB *db, DBT *dbt, db_recno_t recno)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref, dbt, recno));
+}
+
+static int Db_bt_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static size_t Db_bt_prefix_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static int Db_dup_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
+}
+
+static u_int32_t Db_h_hash_callback(DB *db, const void *data, u_int32_t len)
+{
+ DB_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref, data, len));
+}
+
+static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref, opcode, percent);
+}
+
+static int DbEnv_rep_transport_callback(DB_ENV *dbenv,
+ const DBT *control, const DBT *rec,
+ int envid, u_int32_t flags)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_rep_transport(dbinfo, dbenv,
+ dbinfo->jenvref, control, rec, envid, (int)flags));
+}
+
+static int DbEnv_app_dispatch_callback(DB_ENV *dbenv, DBT *dbt,
+ DB_LSN *lsn, db_recops recops)
+{
+ DB_ENV_JAVAINFO *dbinfo;
+
+ DB_ASSERT(dbenv != NULL);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_app_dispatch(dbinfo, dbenv, dbinfo->jenvref, dbt,
+ lsn, recops));
+}
+
+/****************************************************************
+ *
+ * Implementation of class DBT_javainfo
+ */
+DBT_JAVAINFO *
+dbjit_construct()
+{
+ DBT_JAVAINFO *dbjit;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DBT_JAVAINFO), &dbjit)) != 0)
+ return (NULL);
+
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ return (dbjit);
+}
+
+void dbjit_destroy(DBT_JAVAINFO *dbjit)
+{
+ DB_ASSERT(!F_ISSET(dbjit, DBT_JAVAINFO_LOCKED));
+ /* Extra paranoia */
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ (void)__os_free(NULL, dbjit);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_ENV_JAVAINFO
+ */
+
+/* create/initialize an object */
+DB_ENV_JAVAINFO *
+dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
+ jobject default_errcall,
+ int is_dbopen)
+{
+ DB_ENV_JAVAINFO *dbjie;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_ENV_JAVAINFO), &dbjie)) != 0)
+ return (NULL);
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ dbjie->is_dbopen = is_dbopen;
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm) != 0) {
+ __os_free(NULL, dbjie);
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ return (NULL);
+ }
+
+ /*
+ * The default error call just prints to the 'System.err'
+ * stream. If the user does set_errcall to null, we'll
+ * want to have a reference to set it back to.
+ *
+ * Why do we have always set db_errcall to our own callback?
+ * Because it makes the interaction between setting the
+ * error prefix, error stream, and user's error callback
+ * that much easier.
+ */
+ dbjie->default_errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->jenvref = NEW_GLOBAL_REF(jnienv, jenv);
+ return (dbjie);
+}
+
+/* release all objects held by this this one */
+void dbjie_dealloc(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
+ dbjie->feedback = NULL;
+ }
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
+ dbjie->app_dispatch = NULL;
+ }
+ if (dbjie->errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NULL;
+ }
+ if (dbjie->default_errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall);
+ dbjie->default_errcall = NULL;
+ }
+ if (dbjie->jenvref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->jenvref);
+ dbjie->jenvref = NULL;
+ }
+
+ if (dbjie->conflict != NULL) {
+ __os_free(NULL, dbjie->conflict);
+ dbjie->conflict = NULL;
+ dbjie->conflict_size = 0;
+ }
+ if (dbjie->errpfx != NULL) {
+ __os_free(NULL, dbjie->errpfx);
+ dbjie->errpfx = NULL;
+ }
+}
+
+/* free this object, releasing anything allocated on its behalf */
+void dbjie_destroy(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ dbjie_dealloc(dbjie, jnienv);
+
+ /* Extra paranoia */
+ memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
+ (void)__os_free(NULL, dbjie);
+}
+
+/*
+ * Attach to the current thread that is running and
+ * return that. We use the java virtual machine
+ * that we saved in the constructor.
+ */
+JNIEnv *
+dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
+{
+ /*
+ * Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /*
+ * This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbjie->javavm)->AttachCurrentThread(dbjie->javavm, &attachret, 0)
+ != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jstring
+dbjie_get_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
+{
+ return (get_java_string(jnienv, dbjie->errpfx));
+}
+
+void
+dbjie_set_errcall(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jobject new_errcall)
+{
+ /*
+ * If the new_errcall is null, we'll set the error call
+ * to the default one.
+ */
+ if (new_errcall == NULL)
+ new_errcall = dbjie->default_errcall;
+
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, new_errcall);
+}
+
+void
+dbjie_set_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jstring errpfx)
+{
+ if (dbjie->errpfx != NULL)
+ __os_free(NULL, dbjie->errpfx);
+
+ if (errpfx)
+ dbjie->errpfx = get_c_string(jnienv, errpfx);
+ else
+ dbjie->errpfx = NULL;
+}
+
+void
+dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, u_char *newarr, size_t size)
+{
+ if (dbjie->conflict != NULL)
+ (void)__os_free(NULL, dbjie->conflict);
+ dbjie->conflict = newarr;
+ dbjie->conflict_size = size;
+}
+
+void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject jfeedback)
+{
+ int err;
+
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
+ }
+ if (jfeedback == NULL) {
+ if ((err = dbenv->set_feedback(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_feedback(dbenv,
+ DbEnv_feedback_callback)) != 0)
+ report_exception(jnienv, "set_feedback failed",
+ err, 0);
+ }
+
+ dbjie->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
+}
+
+void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+ jclass feedback_class;
+ jmethodID id;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ if ((feedback_class =
+ get_class(jnienv, name_DbEnvFeedback)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbEnvFeedback);
+ return; /* An exception has been posted. */
+ }
+ id = (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/DbEnv;II)V");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback method feedback\n");
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, dbjie->feedback, id,
+ jenv, (jint)opcode, (jint)percent);
+}
+
+void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject jtransport)
+{
+ int err;
+
+ if (dbjie->rep_transport != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbjie->rep_transport);
+
+ err = dbenv->set_rep_transport(dbenv, id,
+ DbEnv_rep_transport_callback);
+ verify_return(jnienv, err, 0);
+
+ dbjie->rep_transport = NEW_GLOBAL_REF(jnienv, jtransport);
+}
+
+int dbjie_call_rep_transport(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int flags, int envid)
+{
+ JNIEnv *jnienv;
+ jclass rep_transport_class;
+ jmethodID jid;
+ jobject jcdbt, jrdbt;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ if ((rep_transport_class =
+ get_class(jnienv, name_DbRepTransport)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbRepTransport);
+ return (0); /* An exception has been posted. */
+ }
+ jid = (*jnienv)->GetMethodID(jnienv, rep_transport_class,
+ "send",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;II)I");
+
+ if (!jid) {
+ fprintf(stderr, "Cannot find callback method send\n");
+ return (0);
+ }
+
+ jcdbt = get_const_Dbt(jnienv, control, NULL);
+ jrdbt = get_const_Dbt(jnienv, rec, NULL);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->rep_transport, jid, jenv,
+ jcdbt, jrdbt, flags, envid);
+}
+
+void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject japp_dispatch)
+{
+ int err;
+
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
+ }
+ if (japp_dispatch == NULL) {
+ if ((err = dbenv->set_app_dispatch(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
+ err, 0);
+ }
+ else {
+ if ((err = dbenv->set_app_dispatch(dbenv,
+ DbEnv_app_dispatch_callback)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
+ err, 0);
+ }
+
+ dbjie->app_dispatch = NEW_GLOBAL_REF(jnienv, japp_dispatch);
+}
+
+int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops)
+{
+ JNIEnv *jnienv;
+ jclass app_dispatch_class;
+ jmethodID id;
+ jobject jdbt;
+ jobject jlsn;
+
+ COMPQUIET(dbenv, NULL);
+ jnienv = dbjie_get_jnienv(dbjie);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ if ((app_dispatch_class =
+ get_class(jnienv, name_DbTxnRecover)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbTxnRecover);
+ return (0); /* An exception has been posted. */
+ }
+ id = (*jnienv)->GetMethodID(jnienv, app_dispatch_class,
+ "app_dispatch",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLsn;"
+ "I)I");
+ if (!id) {
+ fprintf(stderr, "Cannot find callback method app_dispatch\n");
+ return (0);
+ }
+
+ jdbt = get_Dbt(jnienv, dbt, NULL);
+
+ if (lsn == NULL)
+ jlsn = NULL;
+ else
+ jlsn = get_DbLsn(jnienv, *lsn);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->app_dispatch, id, jenv,
+ jdbt, jlsn, recops);
+}
+
+jobject dbjie_get_errcall(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->errcall);
+}
+
+jint dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
+{
+ return (dbjie->is_dbopen);
+}
+
+/****************************************************************
+ *
+ * Implementation of class DB_JAVAINFO
+ */
+
+DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags)
+{
+ DB_JAVAINFO *dbji;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_JAVAINFO), &dbji)) != 0)
+ return (NULL);
+
+ memset(dbji, 0, sizeof(DB_JAVAINFO));
+
+ if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm) != 0) {
+ report_exception(jnienv, "cannot get Java VM", 0, 0);
+ (void)__os_free(NULL, dbji);
+ return (NULL);
+ }
+ dbji->jdbref = NEW_GLOBAL_REF(jnienv, jdb);
+ dbji->construct_flags = flags;
+ return (dbji);
+}
+
+void
+dbji_dealloc(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
+ dbji->append_recno = NULL;
+ }
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
+ }
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
+ dbji->bt_compare = NULL;
+ }
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
+ dbji->bt_prefix = NULL;
+ }
+ if (dbji->dup_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
+ dbji->dup_compare = NULL;
+ }
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
+ dbji->feedback = NULL;
+ }
+ if (dbji->h_hash != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
+ dbji->h_hash = NULL;
+ }
+ if (dbji->jdbref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->jdbref);
+ dbji->jdbref = NULL;
+ }
+}
+
+void
+dbji_destroy(DB_JAVAINFO *dbji, JNIEnv *jnienv)
+{
+ dbji_dealloc(dbji, jnienv);
+ __os_free(NULL, dbji);
+}
+
+JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
+{
+ /*
+ * Note:
+ * Different versions of the JNI disagree on the signature
+ * for AttachCurrentThread. The most recent documentation
+ * seems to say that (JNIEnv **) is correct, but newer
+ * JNIs seem to use (void **), oddly enough.
+ */
+#ifdef JNI_VERSION_1_2
+ void *attachret = 0;
+#else
+ JNIEnv *attachret = 0;
+#endif
+
+ /*
+ * This should always succeed, as we are called via
+ * some Java activity. I think therefore I am (a thread).
+ */
+ if ((*dbji->javavm)->AttachCurrentThread(dbji->javavm, &attachret, 0)
+ != 0)
+ return (0);
+
+ return ((JNIEnv *)attachret);
+}
+
+jint dbji_get_flags(DB_JAVAINFO *dbji)
+{
+ return (dbji->construct_flags);
+}
+
+void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jfeedback)
+{
+ jclass feedback_class;
+
+ if (dbji->feedback_method_id == NULL) {
+ if ((feedback_class =
+ get_class(jnienv, name_DbFeedback)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->feedback_method_id =
+ (*jnienv)->GetMethodID(jnienv, feedback_class,
+ "feedback",
+ "(Lcom/sleepycat/db/Db;II)V");
+ if (dbji->feedback_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
+ }
+ if (jfeedback == NULL) {
+ db->set_feedback(db, NULL);
+ }
+ else {
+ db->set_feedback(db, Db_feedback_callback);
+ }
+
+ dbji->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
+
+}
+
+void dbji_call_feedback(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ int opcode, int percent)
+{
+ JNIEnv *jnienv;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return;
+ }
+
+ DB_ASSERT(dbji->feedback_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->feedback,
+ dbji->feedback_method_id,
+ jdb, (jint)opcode, (jint)percent);
+}
+
+void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcallback)
+{
+ jclass append_recno_class;
+
+ if (dbji->append_recno_method_id == NULL) {
+ if ((append_recno_class =
+ get_class(jnienv, name_DbAppendRecno)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->append_recno_method_id =
+ (*jnienv)->GetMethodID(jnienv, append_recno_class,
+ "db_append_recno",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;I)V");
+ if (dbji->append_recno_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
+ }
+ if (jcallback == NULL) {
+ db->set_append_recno(db, NULL);
+ }
+ else {
+ db->set_append_recno(db, Db_append_recno_callback);
+ }
+
+ dbji->append_recno = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ DBT *dbt, jint recno)
+{
+ JNIEnv *jnienv;
+ jobject jresult;
+ DBT_JAVAINFO *dbtji;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ u_char *bytearray;
+ int err;
+
+ jnienv = dbji_get_jnienv(dbji);
+ dbenv = db->dbenv;
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jresult = get_Dbt(jnienv, dbt, &dbtji);
+
+ DB_ASSERT(dbji->append_recno_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->append_recno,
+ dbji->append_recno_method_id,
+ jdb, jresult, recno);
+
+ /*
+ * The underlying C API requires that an errno be returned
+ * on error. Java users know nothing of errnos, so we
+ * allow them to throw exceptions instead. We leave the
+ * exception in place and return DB_JAVA_CALLBACK to the C API
+ * that called us. Eventually the DB->get will fail and
+ * when java prepares to throw an exception in
+ * report_exception(), this will be spotted as a special case,
+ * and the original exception will be preserved.
+ *
+ * Note: we have sometimes noticed strange behavior with
+ * exceptions under Linux 1.1.7 JVM. (i.e. multiple calls
+ * to ExceptionOccurred() may report different results).
+ * Currently we don't know of any problems related to this
+ * in our code, but if it pops up in the future, users are
+ * encouranged to get a more recent JVM.
+ */
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ /*
+ * Now get the DBT back from java, because the user probably
+ * changed it. We'll have to copy back the array too and let
+ * our caller free it.
+ *
+ * We expect that the user *has* changed the DBT (why else would
+ * they set up an append_recno callback?) so we don't
+ * worry about optimizing the unchanged case.
+ */
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ memcpy(dbt, &lresult.javainfo->dbt, sizeof(DBT));
+ if ((err = __os_malloc(dbenv, dbt->size, &bytearray)) != 0)
+ goto out;
+
+ memcpy(bytearray, dbt->data, dbt->size);
+ dbt->data = bytearray;
+ dbt->flags |= DB_DBT_APPMALLOC;
+
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
+}
+
+void dbji_set_assoc_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject jcallback, int flags)
+{
+ jclass assoc_class;
+ int err;
+
+ if (dbji->assoc_method_id == NULL) {
+ if ((assoc_class =
+ get_class(jnienv, name_DbSecondaryKeyCreate)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->assoc_method_id =
+ (*jnienv)->GetMethodID(jnienv, assoc_class,
+ "secondary_key_create",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->assoc_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
+ }
+
+ if (jcallback == NULL)
+ err = db->associate(db, txn, second, NULL, flags);
+ else
+ err = db->associate(db, txn, second, Db_assoc_callback, flags);
+
+ if (verify_return(jnienv, err, 0))
+ dbji->assoc = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_assoc(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *key, const DBT *value, DBT *result)
+{
+ JNIEnv *jnienv;
+ jobject jresult;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ int err;
+ int sz;
+ u_char *bytearray;
+ jint retval;
+
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ DB_ASSERT(dbji->assoc_method_id != NULL);
+
+ dbenv = db->dbenv;
+ jresult = create_default_object(jnienv, name_DBT);
+
+ retval = (*jnienv)->CallIntMethod(jnienv, dbji->assoc,
+ dbji->assoc_method_id, jdb,
+ get_const_Dbt(jnienv, key, NULL),
+ get_const_Dbt(jnienv, value, NULL),
+ jresult);
+ if (retval != 0)
+ return (retval);
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ sz = lresult.javainfo->dbt.size;
+ if (sz > 0) {
+ bytearray = (u_char *)lresult.javainfo->dbt.data;
+
+ /*
+ * If the byte array is in the range of one of the
+ * arrays passed to us we can use it directly.
+ * If not, we must create our own array and
+ * fill it in with the java array. Since
+ * the java array may disappear and we don't
+ * want to keep its memory locked indefinitely,
+ * we cannot just pin the array.
+ *
+ * XXX consider pinning the array, and having
+ * some way for the C layer to notify the java
+ * layer when it can be unpinned.
+ */
+ if ((bytearray < (u_char *)key->data ||
+ bytearray + sz > (u_char *)key->data + key->size) &&
+ (bytearray < (u_char *)value->data ||
+ bytearray + sz > (u_char *)value->data + value->size)) {
+
+ result->flags |= DB_DBT_APPMALLOC;
+ if ((err = __os_malloc(dbenv, sz, &bytearray)) != 0)
+ goto out;
+ memcpy(bytearray, lresult.javainfo->dbt.data, sz);
+ }
+ result->data = bytearray;
+ result->size = sz;
+ }
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
+}
+
+void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass bt_compare_class;
+
+ if (dbji->bt_compare_method_id == NULL) {
+ if ((bt_compare_class =
+ get_class(jnienv, name_DbBtreeCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_compare_method_id =
+ (*jnienv)->GetMethodID(jnienv, bt_compare_class,
+ "bt_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_compare_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
+ }
+ if (jcompare == NULL) {
+ db->set_bt_compare(db, NULL);
+ }
+ else {
+ db->set_bt_compare(db, Db_bt_compare_callback);
+ }
+
+ dbji->bt_compare = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare,
+ dbji->bt_compare_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jprefix)
+{
+ jclass bt_prefix_class;
+
+ if (dbji->bt_prefix_method_id == NULL) {
+ if ((bt_prefix_class =
+ get_class(jnienv, name_DbBtreePrefix)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_prefix_method_id =
+ (*jnienv)->GetMethodID(jnienv, bt_prefix_class,
+ "bt_prefix",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->bt_prefix_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
+ }
+ if (jprefix == NULL) {
+ db->set_bt_prefix(db, NULL);
+ }
+ else {
+ db->set_bt_prefix(db, Db_bt_prefix_callback);
+ }
+
+ dbji->bt_prefix = NEW_GLOBAL_REF(jnienv, jprefix);
+}
+
+size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_prefix_method_id != NULL);
+ return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix,
+ dbji->bt_prefix_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jcompare)
+{
+ jclass dup_compare_class;
+
+ if (dbji->dup_compare_method_id == NULL) {
+ if ((dup_compare_class =
+ get_class(jnienv, name_DbDupCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->dup_compare_method_id =
+ (*jnienv)->GetMethodID(jnienv, dup_compare_class,
+ "dup_compare",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->dup_compare_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->dup_compare != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
+
+ if (jcompare == NULL)
+ db->set_dup_compare(db, NULL);
+ else
+ db->set_dup_compare(db, Db_dup_compare_callback);
+
+ dbji->dup_compare = NEW_GLOBAL_REF(jnienv, jcompare);
+}
+
+int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2)
+{
+ JNIEnv *jnienv;
+ jobject jdbt1, jdbt2;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->dup_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare,
+ dbji->dup_compare_method_id,
+ jdb, jdbt1, jdbt2);
+}
+
+void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, jobject jhash)
+{
+ jclass h_hash_class;
+
+ if (dbji->h_hash_method_id == NULL) {
+ if ((h_hash_class =
+ get_class(jnienv, name_DbHash)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->h_hash_method_id =
+ (*jnienv)->GetMethodID(jnienv, h_hash_class,
+ "hash",
+ "(Lcom/sleepycat/db/Db;"
+ "[BI)I");
+ if (dbji->h_hash_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
+ }
+
+ if (dbji->h_hash != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
+
+ if (jhash == NULL)
+ db->set_h_hash(db, NULL);
+ else
+ db->set_h_hash(db, Db_h_hash_callback);
+
+ dbji->h_hash = NEW_GLOBAL_REF(jnienv, jhash);
+}
+
+int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const void *data, int len)
+{
+ JNIEnv *jnienv;
+ jbyteArray jdata;
+
+ COMPQUIET(db, NULL);
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
+ }
+
+ DB_ASSERT(dbji->h_hash_method_id != NULL);
+
+ if ((jdata = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ return (0); /* An exception has been posted by the JVM */
+ (*jnienv)->SetByteArrayRegion(jnienv, jdata, 0, len, (void *)data);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash,
+ dbji->h_hash_method_id,
+ jdb, jdata, len);
+}
diff --git a/storage/bdb/libdb_java/java_info.h b/storage/bdb/libdb_java/java_info.h
new file mode 100644
index 00000000000..bda83db420e
--- /dev/null
+++ b/storage/bdb/libdb_java/java_info.h
@@ -0,0 +1,221 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_info.h,v 11.35 2002/08/29 14:22:23 margo Exp $
+ */
+
+#ifndef _JAVA_INFO_H_
+#define _JAVA_INFO_H_
+
+/*
+ * "Info" classes for Java implementation of Berkeley DB API.
+ * These classes hold extra information for which there is
+ * no room or counterpart in the base classes used in the C API.
+ * In the case of a DBT, the DBT_javainfo class is stored in the
+ * 'private' variable of the java Dbt, and the DBT_javainfo is subclassed
+ * from a DBT. In the case of DB and DB_ENV, the appropriate
+ * info objects are pointed to by the DB and DB_ENV objects.
+ * This is convenient to implement callbacks.
+ */
+
+/****************************************************************
+ *
+ * Declaration of class DBT_javainfo
+ *
+ * A DBT_javainfo is created whenever a Dbt (java) object is created,
+ * and a pointer to it is stored in its private info storage.
+ * It is subclassed from DBT, because we must retain some extra
+ * information in it while it is in use. In particular, when
+ * a java array is associated with it, we need to keep a Globally
+ * Locked reference to it so it is not GC'd. This reference is
+ * destroyed when the Dbt is GC'd.
+ */
+typedef struct _dbt_javainfo
+{
+ DBT dbt;
+ DB *db; /* associated DB */
+ jobject dbtref; /* the java Dbt object */
+ jbyteArray array; /* the java array object -
+ this is only valid during the API call */
+ int offset; /* offset into the Java array */
+
+#define DBT_JAVAINFO_LOCKED 0x01 /* a LOCKED_DBT has been created */
+ u_int32_t flags;
+}
+DBT_JAVAINFO; /* used with all 'dbtji' functions */
+
+/* create/initialize a DBT_JAVAINFO object */
+extern DBT_JAVAINFO *dbjit_construct();
+
+/* free this DBT_JAVAINFO, releasing anything allocated on its behalf */
+extern void dbjit_destroy(DBT_JAVAINFO *dbjit);
+
+/****************************************************************
+ *
+ * Declaration of class DB_ENV_JAVAINFO
+ *
+ * A DB_ENV_JAVAINFO is allocated and stuffed into the cj_internal
+ * and the db_errpfx for every DB_ENV created. It holds a
+ * little extra info that is needed to support callbacks.
+ *
+ * There's a bit of trickery here, because we have built this
+ * above a layer that has a C function callback that gets
+ * invoked when an error occurs. One of the C callback's arguments
+ * is the prefix from the DB_ENV, but since we stuffed a pointer
+ * to our own DB_ENV_JAVAINFO into the prefix, we get that object as an
+ * argument to the C callback. Thus, the C callback can have
+ * access to much more than just the prefix, and it needs that
+ * to call back into the Java enviroment.
+ *
+ * The DB_ENV_JAVAINFO object holds a copy of the Java Virtual Machine,
+ * which is needed to attach to the current running thread
+ * whenever we need to make a callback. (This is more reliable
+ * than our previous approach, which was to save the thread
+ * that created the DbEnv). It also has the Java callback object,
+ * as well as a 'default' callback object that is used when the
+ * caller sets the callback to null. It also has the original
+ * error prefix, since we overwrote the one in the DB_ENV.
+ * There are also fields that are unrelated to the handling
+ * of callbacks, but are convenient to attach to a DB_ENV.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of db_errpfx, db_errcall, cj_internal
+ * for a DB_ENV that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB_ENV, this should be true.
+ */
+typedef struct _db_env_javainfo
+{
+ JavaVM *javavm;
+ int is_dbopen;
+ char *errpfx;
+ jobject jenvref; /* global reference */
+ jobject default_errcall; /* global reference */
+ jobject errcall; /* global reference */
+ jobject feedback; /* global reference */
+ jobject rep_transport; /* global reference */
+ jobject app_dispatch; /* global reference */
+ jobject recovery_init; /* global reference */
+ u_char *conflict;
+ size_t conflict_size;
+ jint construct_flags;
+}
+DB_ENV_JAVAINFO; /* used with all 'dbjie' functions */
+
+/* create/initialize an object */
+extern DB_ENV_JAVAINFO *dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
+ jobject default_errcall,
+ int is_dbopen);
+
+/* release all objects held by this this one */
+extern void dbjie_dealloc(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbjie_destroy(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbjie_get_jnienv(DB_ENV_JAVAINFO *);
+
+extern void dbjie_set_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jstring errpfx);
+extern jstring dbjie_get_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
+extern void dbjie_set_errcall(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ jobject new_errcall);
+extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, u_char *v, size_t sz);
+extern void dbjie_set_feedback_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern void dbjie_call_feedback(DB_ENV_JAVAINFO *, DB_ENV *dbenv, jobject jenv,
+ int opcode, int percent);
+extern void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_recovery_init(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv);
+extern void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject obj);
+extern int dbjie_call_rep_transport(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int envid, int flags);
+extern void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject value);
+extern int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *,
+ DB_ENV *dbenv, jobject jenv,
+ DBT *dbt, DB_LSN *lsn, int recops);
+extern jobject dbjie_get_errcall(DB_ENV_JAVAINFO *) ;
+extern jint dbjie_is_dbopen(DB_ENV_JAVAINFO *);
+
+/****************************************************************
+ *
+ * Declaration of class DB_JAVAINFO
+ *
+ * A DB_JAVAINFO is allocated and stuffed into the cj_internal field
+ * for every DB created. It holds a little extra info that is needed
+ * to support callbacks.
+ *
+ * Note: We assume that the Java layer is the only one
+ * fiddling with the contents of cj_internal
+ * for a DB that was created via Java. Since the Java layer should
+ * have the only pointer to such a DB, this should be true.
+ */
+typedef struct _db_javainfo
+{
+ JavaVM *javavm;
+ jobject jdbref; /* global reference */
+ jobject append_recno; /* global reference */
+ jobject assoc; /* global reference */
+ jobject bt_compare; /* global reference */
+ jobject bt_prefix; /* global reference */
+ jobject dup_compare; /* global reference */
+ jobject feedback; /* global reference */
+ jobject h_hash; /* global reference */
+ jmethodID append_recno_method_id;
+ jmethodID assoc_method_id;
+ jmethodID bt_compare_method_id;
+ jmethodID bt_prefix_method_id;
+ jmethodID dup_compare_method_id;
+ jmethodID feedback_method_id;
+ jmethodID h_hash_method_id;
+ jint construct_flags;
+} DB_JAVAINFO;
+
+/* create/initialize an object */
+extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags);
+
+/* release all objects held by this this one */
+extern void dbji_dealloc(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* free this object, releasing anything allocated on its behalf */
+extern void dbji_destroy(DB_JAVAINFO *, JNIEnv *jnienv);
+
+/* This gets the environment for the current thread */
+extern JNIEnv *dbji_get_jnienv();
+extern jint dbji_get_flags();
+
+extern void dbji_set_feedback_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern void dbji_call_feedback(DB_JAVAINFO *, DB *db, jobject jdb,
+ int opcode, int percent);
+
+extern void dbji_set_append_recno_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_append_recno(DB_JAVAINFO *, DB *db, jobject jdb,
+ DBT *dbt, jint recno);
+extern void dbji_set_assoc_object(DB_JAVAINFO *, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject value, int flags);
+extern int dbji_call_assoc(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *key, const DBT* data, DBT *result);
+extern void dbji_set_bt_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_bt_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_bt_prefix_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern size_t dbji_call_bt_prefix(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_dup_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_dup_compare(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *dbt1, const DBT *dbt2);
+extern void dbji_set_h_hash_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
+extern int dbji_call_h_hash(DB_JAVAINFO *, DB *db, jobject jdb,
+ const void *data, int len);
+
+#endif /* !_JAVA_INFO_H_ */
diff --git a/storage/bdb/libdb_java/java_locked.c b/storage/bdb/libdb_java/java_locked.c
new file mode 100644
index 00000000000..9534a387b40
--- /dev/null
+++ b/storage/bdb/libdb_java/java_locked.c
@@ -0,0 +1,321 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_locked.c,v 11.32 2002/08/06 05:19:07 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+/****************************************************************
+ *
+ * Implementation of functions to manipulate LOCKED_DBT.
+ */
+int
+locked_dbt_get(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv,
+ jobject jdbt, OpKind kind)
+{
+ DBT *dbt;
+
+ COMPQUIET(dbenv, NULL);
+ ldbt->jdbt = jdbt;
+ ldbt->java_array_len = 0;
+ ldbt->flags = 0;
+ ldbt->kind = kind;
+ ldbt->java_data = 0;
+ ldbt->before_data = 0;
+ ldbt->javainfo =
+ (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, jdbt);
+
+ if (!verify_non_null(jnienv, ldbt->javainfo)) {
+ report_exception(jnienv, "Dbt is gc'ed?", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ if (F_ISSET(ldbt->javainfo, DBT_JAVAINFO_LOCKED)) {
+ report_exception(jnienv, "Dbt is already in use", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ dbt = &ldbt->javainfo->dbt;
+
+ if ((*jnienv)->GetBooleanField(jnienv,
+ jdbt, fid_Dbt_must_create_data) != 0)
+ F_SET(ldbt, LOCKED_CREATE_DATA);
+ else
+ ldbt->javainfo->array =
+ (*jnienv)->GetObjectField(jnienv, jdbt, fid_Dbt_data);
+
+ dbt->size = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_size);
+ dbt->ulen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_ulen);
+ dbt->dlen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_dlen);
+ dbt->doff = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_doff);
+ dbt->flags = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_flags);
+ ldbt->javainfo->offset = (*jnienv)->GetIntField(jnienv, jdbt,
+ fid_Dbt_offset);
+
+ /*
+ * If no flags are set, use default behavior of DB_DBT_MALLOC.
+ * We can safely set dbt->flags because flags will never be copied
+ * back to the Java Dbt.
+ */
+ if (kind != inOp &&
+ !F_ISSET(dbt, DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC))
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ /*
+ * If this is requested to be realloc with an existing array,
+ * we cannot use the underlying realloc, because the array we
+ * will pass in is allocated by the Java VM, not us, so it
+ * cannot be realloced. We simulate the reallocation by using
+ * USERMEM and reallocating the java array when a ENOMEM error
+ * occurs. We change the flags during the operation, and they
+ * are reset when the operation completes (in locked_dbt_put).
+ */
+ if (F_ISSET(dbt, DB_DBT_REALLOC) && ldbt->javainfo->array != NULL) {
+ F_CLR(dbt, DB_DBT_REALLOC);
+ F_SET(dbt, DB_DBT_USERMEM);
+ F_SET(ldbt, LOCKED_REALLOC_NONNULL);
+ }
+
+ if ((F_ISSET(dbt, DB_DBT_USERMEM) || kind != outOp) &&
+ !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_USERMEM
+ * or it's a set (or get/set) operation,
+ * then the data should point to a java array.
+ * Note that outOp means data is coming out of the database
+ * (it's a get). inOp means data is going into the database
+ * (either a put, or a key input).
+ */
+ if (!ldbt->javainfo->array) {
+ report_exception(jnienv, "Dbt.data is null", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+
+ /* Verify other parameters */
+ ldbt->java_array_len = (*jnienv)->GetArrayLength(jnienv,
+ ldbt->javainfo->array);
+ if (ldbt->javainfo->offset < 0 ) {
+ report_exception(jnienv, "Dbt.offset illegal", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+ if (dbt->size + ldbt->javainfo->offset > ldbt->java_array_len) {
+ report_exception(jnienv,
+ "Dbt.size + Dbt.offset greater than array length",
+ 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
+ return (EINVAL);
+ }
+
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ (jboolean *)0);
+
+ dbt->data = ldbt->before_data = ldbt->java_data +
+ ldbt->javainfo->offset;
+ }
+ else if (!F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_MALLOC or DB_DBT_REALLOC with
+ * a null array, then the data is allocated by DB.
+ */
+ dbt->data = ldbt->before_data = 0;
+ }
+
+ /*
+ * RPC makes the assumption that if dbt->size is non-zero, there
+ * is data to copy from dbt->data. We may have set dbt->size
+ * to a non-zero integer above but decided not to point
+ * dbt->data at anything. (One example is if we're doing an outOp
+ * with an already-used Dbt whose values we expect to just
+ * overwrite.)
+ *
+ * Clean up the dbt fields so we don't run into trouble.
+ * (Note that doff, dlen, and flags all may contain meaningful
+ * values.)
+ */
+ if (dbt->data == NULL)
+ dbt->size = dbt->ulen = 0;
+
+ F_SET(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
+ return (0);
+}
+
+/*
+ * locked_dbt_put must be called for any LOCKED_DBT struct before a
+ * java handler returns to the user. It can be thought of as the
+ * LOCKED_DBT destructor. It copies any information from temporary
+ * structures back to user accessible arrays, and of course must free
+ * memory and remove references. The LOCKED_DBT itself is not freed,
+ * as it is expected to be a stack variable.
+ *
+ * Note that after this call, the LOCKED_DBT can still be used in
+ * limited ways, e.g. to look at values in the C DBT.
+ */
+void
+locked_dbt_put(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
+{
+ DBT *dbt;
+
+ dbt = &ldbt->javainfo->dbt;
+
+ /*
+ * If the error flag was set, we never succeeded
+ * in allocating storage.
+ */
+ if (F_ISSET(ldbt, LOCKED_ERROR))
+ return;
+
+ if (((F_ISSET(dbt, DB_DBT_USERMEM) ||
+ F_ISSET(ldbt, LOCKED_REALLOC_NONNULL)) ||
+ ldbt->kind == inOp) && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_USERMEM or it's a set
+ * (or get/set) operation, then the data may be already in
+ * the java array, in which case, we just need to release it.
+ * If DB didn't put it in the array (indicated by the
+ * dbt->data changing), we need to do that
+ */
+ if (ldbt->before_data != ldbt->java_data) {
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ ldbt->javainfo->array,
+ ldbt->javainfo->offset,
+ dbt->ulen,
+ ldbt->before_data);
+ }
+ (*jnienv)->ReleaseByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ ldbt->java_data, 0);
+ dbt->data = 0;
+ }
+ else if (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC) &&
+ ldbt->kind != inOp && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_MALLOC, or DB_DBT_REALLOC
+ * with a zero buffer, then the data was allocated by
+ * DB. If dbt->data is zero, it means an error
+ * occurred (and should have been already reported).
+ */
+ if (dbt->data) {
+
+ /*
+ * In the case of SET_RANGE, the key is inOutOp
+ * and when not found, its data will be left as
+ * its original value. Only copy and free it
+ * here if it has been allocated by DB
+ * (dbt->data has changed).
+ */
+ if (dbt->data != ldbt->before_data) {
+ jbyteArray newarr;
+
+ if ((newarr = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ /* The JVM has posted an exception. */
+ F_SET(ldbt, LOCKED_ERROR);
+ return;
+ }
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt,
+ fid_Dbt_data,
+ newarr);
+ ldbt->javainfo->offset = 0;
+ (*jnienv)->SetByteArrayRegion(jnienv,
+ newarr, 0, dbt->size,
+ (jbyte *)dbt->data);
+ (void)__os_ufree(dbenv, dbt->data);
+ dbt->data = 0;
+ }
+ }
+ }
+
+ /*
+ * The size field may have changed after a DB API call,
+ * so we set that back too.
+ */
+ (*jnienv)->SetIntField(jnienv, ldbt->jdbt, fid_Dbt_size, dbt->size);
+ ldbt->javainfo->array = NULL;
+ F_CLR(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
+}
+
+/*
+ * Realloc the java array to receive data if the DBT used
+ * DB_DBT_REALLOC flag with a non-null data array, and the last
+ * operation set the size field to an amount greater than ulen.
+ * Return 1 if these conditions are met, otherwise 0. This is used
+ * internally to simulate the operations needed for DB_DBT_REALLOC.
+ */
+int locked_dbt_realloc(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
+{
+ DBT *dbt;
+
+ COMPQUIET(dbenv, NULL);
+ dbt = &ldbt->javainfo->dbt;
+
+ if (!F_ISSET(ldbt, LOCKED_REALLOC_NONNULL) ||
+ F_ISSET(ldbt, LOCKED_ERROR) || dbt->size <= dbt->ulen)
+ return (0);
+
+ (*jnienv)->ReleaseByteArrayElements(jnienv, ldbt->javainfo->array,
+ ldbt->java_data, 0);
+
+ /*
+ * We allocate a new array of the needed size.
+ * We'll set the offset to 0, as the old offset
+ * really doesn't make any sense.
+ */
+ if ((ldbt->javainfo->array = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ F_SET(ldbt, LOCKED_ERROR);
+ return (0);
+ }
+
+ ldbt->java_array_len = dbt->ulen = dbt->size;
+ ldbt->javainfo->offset = 0;
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt, fid_Dbt_data,
+ ldbt->javainfo->array);
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array, (jboolean *)0);
+ memcpy(ldbt->java_data, ldbt->before_data, dbt->ulen);
+ dbt->data = ldbt->before_data = ldbt->java_data;
+ return (1);
+}
+
+/****************************************************************
+ *
+ * Implementation of functions to manipulate LOCKED_STRING.
+ */
+int
+locked_string_get(LOCKED_STRING *ls, JNIEnv *jnienv, jstring jstr)
+{
+ ls->jstr = jstr;
+
+ if (jstr == 0)
+ ls->string = 0;
+ else
+ ls->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
+ (jboolean *)0);
+ return (0);
+}
+
+void locked_string_put(LOCKED_STRING *ls, JNIEnv *jnienv)
+{
+ if (ls->jstr)
+ (*jnienv)->ReleaseStringUTFChars(jnienv, ls->jstr, ls->string);
+}
diff --git a/storage/bdb/libdb_java/java_locked.h b/storage/bdb/libdb_java/java_locked.h
new file mode 100644
index 00000000000..a79d929abee
--- /dev/null
+++ b/storage/bdb/libdb_java/java_locked.h
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_locked.h,v 11.18 2002/05/07 16:12:42 dda Exp $
+ */
+
+#ifndef _JAVA_LOCKED_H_
+#define _JAVA_LOCKED_H_
+
+/*
+ * Used as argument to locked_dbt_get().
+ */
+typedef enum _OpKind {
+ inOp, /* setting data in database (passing data in) */
+ outOp, /* getting data from database to user memory */
+ inOutOp /* both getting/setting data */
+} OpKind;
+
+/*
+ * LOCKED_DBT
+ *
+ * A stack variable LOCKED_DBT should be declared for each Dbt used in a
+ * native call to the DB API. Before the DBT can be used, locked_dbt_get()
+ * must be called to temporarily convert any java array found in the
+ * Dbt (which has a pointer to a DBT_JAVAINFO struct) to actual bytes
+ * in memory that remain locked in place. These bytes are used during
+ * the call to the DB C API, and are released and/or copied back when
+ * locked_dbt_put is called.
+ */
+typedef struct _locked_dbt
+{
+ /* these are accessed externally to locked_dbt_ functions */
+ DBT_JAVAINFO *javainfo;
+ unsigned int java_array_len;
+ jobject jdbt;
+
+ /* these are for used internally by locked_dbt_ functions */
+ jbyte *java_data;
+ jbyte *before_data;
+ OpKind kind;
+
+#define LOCKED_ERROR 0x01 /* error occurred */
+#define LOCKED_CREATE_DATA 0x02 /* must create data on the fly */
+#define LOCKED_REALLOC_NONNULL 0x04 /* DB_DBT_REALLOC flag, nonnull data */
+ u_int32_t flags;
+} LOCKED_DBT;
+
+/* Fill the LOCKED_DBT struct and lock the Java byte array */
+extern int locked_dbt_get(LOCKED_DBT *, JNIEnv *, DB_ENV *, jobject, OpKind);
+
+/* unlock the Java byte array */
+extern void locked_dbt_put(LOCKED_DBT *, JNIEnv *, DB_ENV *);
+
+/* realloc the Java byte array */
+extern int locked_dbt_realloc(LOCKED_DBT *, JNIEnv *, DB_ENV *);
+
+/*
+ * LOCKED_STRING
+ *
+ * A LOCKED_STRING exists temporarily to convert a java jstring object
+ * to a char *. Because the memory for the char * string is
+ * managed by the JVM, it must be released when we are done
+ * looking at it. Typically, locked_string_get() is called at the
+ * beginning of a function for each jstring object, and locked_string_put
+ * is called at the end of each function for each LOCKED_STRING.
+ */
+typedef struct _locked_string
+{
+ /* this accessed externally to locked_string_ functions */
+ const char *string;
+
+ /* this is used internally by locked_string_ functions */
+ jstring jstr;
+} LOCKED_STRING;
+
+extern int locked_string_get(LOCKED_STRING *, JNIEnv *jnienv, jstring jstr);
+extern void locked_string_put(LOCKED_STRING *, JNIEnv *jnienv); /* this unlocks and frees mem */
+
+#endif /* !_JAVA_LOCKED_H_ */
diff --git a/storage/bdb/libdb_java/java_util.c b/storage/bdb/libdb_java/java_util.c
new file mode 100644
index 00000000000..5a538ee0785
--- /dev/null
+++ b/storage/bdb/libdb_java/java_util.c
@@ -0,0 +1,890 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_util.c,v 11.49 2002/09/13 03:09:30 mjc Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+
+#include "db_int.h"
+#include "java_util.h"
+
+#ifdef DB_WIN32
+#define sys_errlist _sys_errlist
+#define sys_nerr _sys_nerr
+#endif
+
+const char * const name_DB = "Db";
+const char * const name_DB_BTREE_STAT = "DbBtreeStat";
+const char * const name_DBC = "Dbc";
+const char * const name_DB_DEADLOCK_EX = "DbDeadlockException";
+const char * const name_DB_ENV = "DbEnv";
+const char * const name_DB_EXCEPTION = "DbException";
+const char * const name_DB_HASH_STAT = "DbHashStat";
+const char * const name_DB_LOCK = "DbLock";
+const char * const name_DB_LOCK_STAT = "DbLockStat";
+const char * const name_DB_LOCKNOTGRANTED_EX = "DbLockNotGrantedException";
+const char * const name_DB_LOGC = "DbLogc";
+const char * const name_DB_LOG_STAT = "DbLogStat";
+const char * const name_DB_LSN = "DbLsn";
+const char * const name_DB_MEMORY_EX = "DbMemoryException";
+const char * const name_DB_MPOOL_FSTAT = "DbMpoolFStat";
+const char * const name_DB_MPOOL_STAT = "DbMpoolStat";
+const char * const name_DB_PREPLIST = "DbPreplist";
+const char * const name_DB_QUEUE_STAT = "DbQueueStat";
+const char * const name_DB_REP_STAT = "DbRepStat";
+const char * const name_DB_RUNRECOVERY_EX = "DbRunRecoveryException";
+const char * const name_DBT = "Dbt";
+const char * const name_DB_TXN = "DbTxn";
+const char * const name_DB_TXN_STAT = "DbTxnStat";
+const char * const name_DB_TXN_STAT_ACTIVE = "DbTxnStat$Active";
+const char * const name_DB_UTIL = "DbUtil";
+const char * const name_DbAppendRecno = "DbAppendRecno";
+const char * const name_DbBtreeCompare = "DbBtreeCompare";
+const char * const name_DbBtreePrefix = "DbBtreePrefix";
+const char * const name_DbDupCompare = "DbDupCompare";
+const char * const name_DbEnvFeedback = "DbEnvFeedback";
+const char * const name_DbErrcall = "DbErrcall";
+const char * const name_DbHash = "DbHash";
+const char * const name_DbLockRequest = "DbLockRequest";
+const char * const name_DbFeedback = "DbFeedback";
+const char * const name_DbRecoveryInit = "DbRecoveryInit";
+const char * const name_DbRepTransport = "DbRepTransport";
+const char * const name_DbSecondaryKeyCreate = "DbSecondaryKeyCreate";
+const char * const name_DbTxnRecover = "DbTxnRecover";
+const char * const name_RepElectResult = "DbEnv$RepElectResult";
+const char * const name_RepProcessMessage = "DbEnv$RepProcessMessage";
+
+const char * const string_signature = "Ljava/lang/String;";
+
+jfieldID fid_Dbt_data;
+jfieldID fid_Dbt_offset;
+jfieldID fid_Dbt_size;
+jfieldID fid_Dbt_ulen;
+jfieldID fid_Dbt_dlen;
+jfieldID fid_Dbt_doff;
+jfieldID fid_Dbt_flags;
+jfieldID fid_Dbt_private_dbobj_;
+jfieldID fid_Dbt_must_create_data;
+jfieldID fid_DbLockRequest_op;
+jfieldID fid_DbLockRequest_mode;
+jfieldID fid_DbLockRequest_timeout;
+jfieldID fid_DbLockRequest_obj;
+jfieldID fid_DbLockRequest_lock;
+jfieldID fid_RepProcessMessage_envid;
+
+/****************************************************************
+ *
+ * Utility functions used by "glue" functions.
+ */
+
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv)
+{
+ jclass cl;
+
+ if ((cl = get_class(jnienv, name_DBT)) == NULL)
+ return; /* An exception has been posted. */
+ fid_Dbt_data = (*jnienv)->GetFieldID(jnienv, cl, "data", "[B");
+ fid_Dbt_offset = (*jnienv)->GetFieldID(jnienv, cl, "offset", "I");
+ fid_Dbt_size = (*jnienv)->GetFieldID(jnienv, cl, "size", "I");
+ fid_Dbt_ulen = (*jnienv)->GetFieldID(jnienv, cl, "ulen", "I");
+ fid_Dbt_dlen = (*jnienv)->GetFieldID(jnienv, cl, "dlen", "I");
+ fid_Dbt_doff = (*jnienv)->GetFieldID(jnienv, cl, "doff", "I");
+ fid_Dbt_flags = (*jnienv)->GetFieldID(jnienv, cl, "flags", "I");
+ fid_Dbt_must_create_data = (*jnienv)->GetFieldID(jnienv, cl,
+ "must_create_data", "Z");
+ fid_Dbt_private_dbobj_ =
+ (*jnienv)->GetFieldID(jnienv, cl, "private_dbobj_", "J");
+
+ if ((cl = get_class(jnienv, name_DbLockRequest)) == NULL)
+ return; /* An exception has been posted. */
+ fid_DbLockRequest_op = (*jnienv)->GetFieldID(jnienv, cl, "op", "I");
+ fid_DbLockRequest_mode = (*jnienv)->GetFieldID(jnienv, cl, "mode", "I");
+ fid_DbLockRequest_timeout =
+ (*jnienv)->GetFieldID(jnienv, cl, "timeout", "I");
+ fid_DbLockRequest_obj = (*jnienv)->GetFieldID(jnienv, cl, "obj",
+ "Lcom/sleepycat/db/Dbt;");
+ fid_DbLockRequest_lock = (*jnienv)->GetFieldID(jnienv, cl, "lock",
+ "Lcom/sleepycat/db/DbLock;");
+
+ if ((cl = get_class(jnienv, name_RepProcessMessage)) == NULL)
+ return; /* An exception has been posted. */
+ fid_RepProcessMessage_envid =
+ (*jnienv)->GetFieldID(jnienv, cl, "envid", "I");
+}
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (0);
+
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj)
+{
+ jclass dbClass;
+ jfieldID id;
+ long_to_ptr lp;
+
+ if (!obj)
+ return (NULL);
+
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
+ return (lp.ptr);
+}
+
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value)
+{
+ long_to_ptr lp;
+ jclass dbClass;
+ jfieldID id;
+
+ lp.java_long = 0; /* no junk in case sizes mismatch */
+ lp.ptr = value;
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
+ id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
+ (*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
+}
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handle
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname)
+{
+ /*
+ * Note: PERFORMANCE: It should be possible to cache jclass's.
+ * If we do a NewGlobalRef on each one, we can keep them
+ * around in a table. A jclass is a jobject, and
+ * since NewGlobalRef returns a jobject, it isn't
+ * technically right, but it would likely work with
+ * most implementations. Possibly make it configurable.
+ */
+ char fullname[128];
+
+ (void)snprintf(fullname, sizeof(fullname),
+ "%s%s", DB_PACKAGE_NAME, classname);
+ return ((*jnienv)->FindClass(jnienv, fullname));
+}
+
+/*
+ * Given a fully qualified name (e.g. "java.util.Hashtable")
+ * return the jclass object. If it can't be found, an
+ * exception is raised and NULL is return.
+ * This is appropriate to be used for classes that may
+ * not be present.
+ */
+jclass get_fully_qualified_class(JNIEnv *jnienv, const char *classname)
+{
+ jclass result;
+
+ result = ((*jnienv)->FindClass(jnienv, classname));
+ if (result == NULL) {
+ jclass cnfe;
+ char message[1024];
+
+ cnfe = (*jnienv)->FindClass(jnienv,
+ "java/lang/ClassNotFoundException");
+ strncpy(message, classname, sizeof(message));
+ strncat(message, ": class not found", sizeof(message));
+ (*jnienv)->ThrowNew(jnienv, cnfe, message);
+ }
+ return (result);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj)
+{
+ char signature[512];
+ jfieldID id;
+
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, object_classname);
+ id = (*jnienv)->GetFieldID(
+ jnienv, class_of_this, name_of_field, signature);
+ (*jnienv)->SetObjectField(jnienv, jthis, id, obj);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value)
+{
+ jfieldID id =
+ (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
+ (*jnienv)->SetIntField(jnienv, jthis, id, value);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value)
+{
+ jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this,
+ name_of_field, "J");
+ (*jnienv)->SetLongField(jnienv, jthis, id, value);
+}
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value)
+{
+ set_object_field(jnienv, class_of_this, jthis, name_DB_LSN,
+ name_of_field, get_DbLsn(jnienv, value));
+}
+
+/*
+ * Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask)
+{
+ jstring textString;
+ jclass dbexcept;
+ jclass javaexcept;
+ jthrowable obj;
+
+ textString = NULL;
+ dbexcept = NULL;
+ javaexcept = NULL;
+
+ switch (err) {
+ /*
+ * DB_JAVA_CALLBACK is returned by
+ * dbji_call_append_recno() (the append_recno callback)
+ * when the Java version of the callback has thrown
+ * an exception, and we want to pass the exception on.
+ * The exception has already been thrown, we
+ * don't want to throw a new one.
+ */
+ case DB_JAVA_CALLBACK:
+ break;
+ case ENOENT:
+ /*
+ * In this case there is a corresponding
+ * standard java exception type that we'll use.
+ * First we make sure that the calling function
+ * expected this kind of error, if not we give
+ * an 'internal error' DbException, since
+ * we must not throw an exception type that isn't
+ * declared in the signature.
+ *
+ * We'll make this a little more general if/when
+ * we add more java standard exceptions.
+ */
+ if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) != 0) {
+ javaexcept = (*jnienv)->FindClass(jnienv,
+ "java/io/FileNotFoundException");
+ }
+ else {
+ char errstr[1024];
+
+ snprintf(errstr, sizeof(errstr),
+ "internal error: unexpected errno: %s",
+ text);
+ textString = get_java_string(jnienv,
+ errstr);
+ dbexcept = get_class(jnienv,
+ name_DB_EXCEPTION);
+ }
+ break;
+ case DB_RUNRECOVERY:
+ dbexcept = get_class(jnienv,
+ name_DB_RUNRECOVERY_EX);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
+ break;
+ default:
+ dbexcept = get_class(jnienv, name_DB_EXCEPTION);
+ break;
+ }
+ if (dbexcept != NULL) {
+ if (textString == NULL)
+ textString = get_java_string(jnienv, text);
+ if ((obj = create_exception(jnienv, textString, err, dbexcept))
+ != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ /* Otherwise, an exception has been posted. */
+ }
+ else if (javaexcept != NULL)
+ (*jnienv)->ThrowNew(jnienv, javaexcept, text);
+ else
+ fprintf(stderr,
+ "report_exception: failed to create an exception\n");
+}
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
+ */
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index)
+{
+ jstring textString;
+ jclass dbexcept;
+ jthrowable obj;
+ jmethodID mid;
+
+ if ((dbexcept = get_class(jnienv, name_DB_LOCKNOTGRANTED_EX)) == NULL)
+ return; /* An exception has been posted. */
+ textString = get_java_string(jnienv, text);
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;II"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLock;I)V");
+ if ((obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
+ mid, textString, op, mode, jdbt, jlock, index)) != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ else
+ fprintf(stderr,
+ "report_notgranted_exception: failed to create an exception\n");
+}
+
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
+ */
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept)
+{
+ jthrowable obj;
+ jmethodID mid;
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;I)V");
+ if (mid != NULL)
+ obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept, mid,
+ text, err);
+ else {
+ fprintf(stderr, "Cannot get exception init method ID!\n");
+ obj = NULL;
+ }
+
+ return (obj);
+}
+
+/*
+ * Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message)
+{
+ jmethodID id;
+ jclass errcall_class;
+ jstring msg;
+
+ if ((errcall_class = get_class(jnienv, name_DbErrcall)) == NULL)
+ return; /* An exception has been posted. */
+ msg = get_java_string(jnienv, message);
+
+ id = (*jnienv)->GetMethodID(jnienv, errcall_class,
+ "errcall",
+ "(Ljava/lang/String;Ljava/lang/String;)V");
+ if (id == NULL) {
+ fprintf(stderr, "Cannot get errcall methodID!\n");
+ fprintf(stderr, "error: %s\n", message);
+ return;
+ }
+
+ (*jnienv)->CallVoidMethod(jnienv, errcall, id, prefix, msg);
+}
+
+/*
+ * If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj)
+{
+ if (obj == NULL) {
+ report_exception(jnienv, "null object", EINVAL, 0);
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask)
+{
+ if (err == 0)
+ return (1);
+
+ report_exception(jnienv, db_strerror(err), err, expect_mask);
+ return (0);
+}
+
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *ldbt)
+{
+ DBT *dbt;
+ jobject exception;
+ jstring text;
+ jclass dbexcept;
+ jmethodID mid;
+
+ if (err != ENOMEM)
+ return (1);
+
+ dbt = &ldbt->javainfo->dbt;
+ if (!F_ISSET(dbt, DB_DBT_USERMEM) || dbt->size <= dbt->ulen)
+ return (1);
+
+ /* Create/throw an exception of type DbMemoryException */
+ if ((dbexcept = get_class(jnienv, name_DB_MEMORY_EX)) == NULL)
+ return (1); /* An exception has been posted. */
+ text = get_java_string(jnienv,
+ "Dbt not large enough for available data");
+ exception = create_exception(jnienv, text, ENOMEM, dbexcept);
+
+ /* Attach the dbt to the exception */
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "set_dbt",
+ "(L" DB_PACKAGE_NAME "Dbt;)V");
+ (*jnienv)->CallVoidMethod(jnienv, exception, mid, ldbt->jdbt);
+ (*jnienv)->Throw(jnienv, exception);
+ return (0);
+}
+
+/*
+ * Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name)
+{
+ jmethodID id;
+ jclass dbclass;
+
+ if ((dbclass = get_class(jnienv, class_name)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetMethodID(jnienv, dbclass, "<init>", "()V");
+ return ((*jnienv)->NewObject(jnienv, dbclass, id));
+}
+
+/*
+ * Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj)
+{
+ jobject jo;
+
+ if (!dbobj)
+ return (0);
+
+ jo = create_default_object(jnienv, class_name);
+ set_private_dbobj(jnienv, class_name, jo, dbobj);
+ return (jo);
+}
+
+/*
+ * Create a copy of the string
+ */
+char *dup_string(const char *str)
+{
+ int len;
+ char *retval;
+ int err;
+
+ len = strlen(str) + 1;
+ if ((err = __os_malloc(NULL, sizeof(char)*len, &retval)) != 0)
+ return (NULL);
+ strncpy(retval, str, len);
+ return (retval);
+}
+
+/*
+ * Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string)
+{
+ if (string == 0)
+ return (0);
+ return ((*jnienv)->NewStringUTF(jnienv, string));
+}
+
+/*
+ * Create a copy of the java string using __os_malloc.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr)
+{
+ const char *utf;
+ char *retval;
+
+ utf = (*jnienv)->GetStringUTFChars(jnienv, jstr, NULL);
+ retval = dup_string(utf);
+ (*jnienv)->ReleaseStringUTFChars(jnienv, jstr, utf);
+ return (retval);
+}
+
+/*
+ * Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB *)get_private_dbobj(jnienv, name_DB, obj));
+}
+
+DB_BTREE_STAT *get_DB_BTREE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_BTREE_STAT *)
+ get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
+}
+
+DBC *get_DBC(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBC *)get_private_dbobj(jnienv, name_DBC, obj));
+}
+
+DB_ENV *get_DB_ENV(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV *)get_private_dbobj(jnienv, name_DB_ENV, obj));
+}
+
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_ENV_JAVAINFO *)get_private_info(jnienv, name_DB_ENV, obj));
+}
+
+DB_HASH_STAT *get_DB_HASH_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_HASH_STAT *)
+ get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
+}
+
+DB_JAVAINFO *get_DB_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_JAVAINFO *)get_private_info(jnienv, name_DB, obj));
+}
+
+DB_LOCK *get_DB_LOCK(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOCK *)get_private_dbobj(jnienv, name_DB_LOCK, obj));
+}
+
+DB_LOGC *get_DB_LOGC(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOGC *)get_private_dbobj(jnienv, name_DB_LOGC, obj));
+}
+
+DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_LOG_STAT *)
+ get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
+}
+
+DB_LSN *get_DB_LSN(JNIEnv *jnienv, /* DbLsn */ jobject obj) {
+ /*
+ * DbLsns that are created from within java (new DbLsn()) rather
+ * than from within C (get_DbLsn()) may not have a "private" DB_LSN
+ * structure allocated for them yet. We can't do this in the
+ * actual constructor (init_lsn()), because there's no way to pass
+ * in an initializing value in, and because the get_DbLsn()/
+ * convert_object() code path needs a copy of the pointer before
+ * the constructor gets called. Thus, get_DbLsn() allocates and
+ * fills a DB_LSN for the object it's about to create.
+ *
+ * Since "new DbLsn()" may reasonably be passed as an argument to
+ * functions such as DbEnv.log_put(), though, we need to make sure
+ * that DB_LSN's get allocated when the object was created from
+ * Java, too. Here, we lazily allocate a new private DB_LSN if
+ * and only if it turns out that we don't already have one.
+ *
+ * The only exception is if the DbLsn object is a Java null
+ * (in which case the jobject will also be NULL). Then a NULL
+ * DB_LSN is legitimate.
+ */
+ DB_LSN *lsnp;
+ int err;
+
+ if (obj == NULL)
+ return (NULL);
+
+ lsnp = (DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj);
+ if (lsnp == NULL) {
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+ memset(lsnp, 0, sizeof(DB_LSN));
+ set_private_dbobj(jnienv, name_DB_LSN, obj, lsnp);
+ }
+
+ return (lsnp);
+}
+
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_FSTAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
+}
+
+DB_MPOOL_STAT *get_DB_MPOOL_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_MPOOL_STAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
+}
+
+DB_QUEUE_STAT *get_DB_QUEUE_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_QUEUE_STAT *)
+ get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
+}
+
+DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN *)get_private_dbobj(jnienv, name_DB_TXN, obj));
+}
+
+DB_TXN_STAT *get_DB_TXN_STAT(JNIEnv *jnienv, jobject obj)
+{
+ return ((DB_TXN_STAT *)
+ get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
+}
+
+DBT *get_DBT(JNIEnv *jnienv, jobject obj)
+{
+ DBT_JAVAINFO *ji;
+
+ ji = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
+ if (ji == NULL)
+ return (NULL);
+ else
+ return (&ji->dbt);
+}
+
+DBT_JAVAINFO *get_DBT_JAVAINFO(JNIEnv *jnienv, jobject obj)
+{
+ return ((DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj));
+}
+
+/*
+ * Convert a C pointer to the various Java objects they represent.
+ */
+jobject get_DbBtreeStat(JNIEnv *jnienv, DB_BTREE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_BTREE_STAT, dbobj));
+}
+
+jobject get_Dbc(JNIEnv *jnienv, DBC *dbobj)
+{
+ return (convert_object(jnienv, name_DBC, dbobj));
+}
+
+jobject get_DbHashStat(JNIEnv *jnienv, DB_HASH_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_HASH_STAT, dbobj));
+}
+
+jobject get_DbLogc(JNIEnv *jnienv, DB_LOGC *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOGC, dbobj));
+}
+
+jobject get_DbLogStat(JNIEnv *jnienv, DB_LOG_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOG_STAT, dbobj));
+}
+
+/*
+ * LSNs are different since they are really normally
+ * treated as by-value objects. We actually create
+ * a pointer to the LSN and store that, deleting it
+ * when the LSN is GC'd.
+ */
+jobject get_DbLsn(JNIEnv *jnienv, DB_LSN dbobj)
+{
+ DB_LSN *lsnp;
+ int err;
+
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+
+ memset(lsnp, 0, sizeof(DB_LSN));
+ *lsnp = dbobj;
+ return (convert_object(jnienv, name_DB_LSN, lsnp));
+}
+
+/*
+ * Shared code for get_Dbt and get_const_Dbt.
+ *
+ * XXX
+ * Currently we make no distinction in implementation of these
+ * two kinds of Dbts, although in the future we may want to.
+ * (It's probably easier to make the optimizations listed below
+ * with readonly Dbts).
+ *
+ * Dbt's created via this function are only used for a short lifetime,
+ * during callback functions. In the future, we should consider taking
+ * advantage of this by having a pool of Dbt objects instead of creating
+ * new ones each time. Because of multithreading, we may need an
+ * arbitrary number. We might also have sharing of the byte arrays
+ * used by the Dbts.
+ */
+static jobject get_Dbt_shared(JNIEnv *jnienv, const DBT *dbt, int readonly,
+ DBT_JAVAINFO **ret_info)
+{
+ jobject jdbt;
+ DBT_JAVAINFO *dbtji;
+
+ COMPQUIET(readonly, 0);
+
+ /* A NULL DBT should become a null Dbt. */
+ if (dbt == NULL)
+ return (NULL);
+
+ /*
+ * Note that a side effect of creating a Dbt object
+ * is the creation of the attached DBT_JAVAINFO object
+ * (see the native implementation of Dbt.init())
+ * A DBT_JAVAINFO object contains its own DBT.
+ */
+ jdbt = create_default_object(jnienv, name_DBT);
+ dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
+ memcpy(&dbtji->dbt, dbt, sizeof(DBT));
+
+ /*
+ * Set the boolean indicator so that the Java side knows to
+ * call back when it wants to look at the array. This avoids
+ * needlessly creating/copying arrays that may never be looked at.
+ */
+ (*jnienv)->SetBooleanField(jnienv, jdbt, fid_Dbt_must_create_data, 1);
+ (*jnienv)->SetIntField(jnienv, jdbt, fid_Dbt_size, dbt->size);
+
+ if (ret_info != NULL)
+ *ret_info = dbtji;
+ return (jdbt);
+}
+
+/*
+ * Get a writeable Dbt.
+ *
+ * Currently we're sharing code with get_const_Dbt.
+ * It really shouldn't be this way, we have a DBT that we can
+ * change, and have some mechanism for copying back
+ * any changes to the original DBT.
+ */
+jobject get_Dbt(JNIEnv *jnienv, DBT *dbt,
+ DBT_JAVAINFO **ret_info)
+{
+ return (get_Dbt_shared(jnienv, dbt, 0, ret_info));
+}
+
+/*
+ * Get a Dbt that we promise not to change, or at least
+ * if there are changes, they don't matter and won't get
+ * seen by anyone.
+ */
+jobject get_const_Dbt(JNIEnv *jnienv, const DBT *dbt,
+ DBT_JAVAINFO **ret_info)
+{
+ return (get_Dbt_shared(jnienv, dbt, 1, ret_info));
+}
+
+jobject get_DbMpoolFStat(JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_FSTAT, dbobj));
+}
+
+jobject get_DbMpoolStat(JNIEnv *jnienv, DB_MPOOL_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_MPOOL_STAT, dbobj));
+}
+
+jobject get_DbQueueStat(JNIEnv *jnienv, DB_QUEUE_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_QUEUE_STAT, dbobj));
+}
+
+jobject get_DbTxn(JNIEnv *jnienv, DB_TXN *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN, dbobj));
+}
+
+jobject get_DbTxnStat(JNIEnv *jnienv, DB_TXN_STAT *dbobj)
+{
+ return (convert_object(jnienv, name_DB_TXN_STAT, dbobj));
+}
diff --git a/storage/bdb/libdb_java/java_util.h b/storage/bdb/libdb_java/java_util.h
new file mode 100644
index 00000000000..08187f6b51f
--- /dev/null
+++ b/storage/bdb/libdb_java/java_util.h
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: java_util.h,v 11.44 2002/08/29 14:22:24 margo Exp $
+ */
+
+#ifndef _JAVA_UTIL_H_
+#define _JAVA_UTIL_H_
+
+#ifdef _MSC_VER
+
+/*
+ * These are level 4 warnings that are explicitly disabled.
+ * With Visual C++, by default you do not see above level 3 unless
+ * you use /W4. But we like to compile with the highest level
+ * warnings to catch other errors.
+ *
+ * 4201: nameless struct/union
+ * triggered by standard include file <winnt.h>
+ *
+ * 4244: '=' : convert from '__int64' to 'unsigned int', possible loss of data
+ * results from making size_t data members correspond to jlongs
+ *
+ * 4514: unreferenced inline function has been removed
+ * jni.h defines methods that are not called
+ *
+ * 4127: conditional expression is constant
+ * occurs because of arg in JAVADB_RW_ACCESS_STRING macro
+ */
+#pragma warning(disable: 4244 4201 4514 4127)
+
+#endif
+
+#include "db_config.h"
+#include "db.h"
+#include "db_int.h"
+#include <jni.h>
+#include "java_info.h"
+#include "java_locked.h"
+#include <string.h> /* needed for memset */
+
+#define DB_PACKAGE_NAME "com/sleepycat/db/"
+
+/* Union to convert longs to pointers (see {get,set}_private_dbobj). */
+typedef union {
+ jlong java_long;
+ void *ptr;
+} long_to_ptr;
+
+/****************************************************************
+ *
+ * Utility functions and definitions used by "glue" functions.
+ */
+
+#define NOT_IMPLEMENTED(str) \
+ report_exception(jnienv, str /*concatenate*/ ": not implemented", 0)
+
+/*
+ * Get, delete a global reference.
+ * Making this operation a function call allows for
+ * easier tracking for debugging. Global references
+ * are mostly grabbed at 'open' and 'close' points,
+ * so there shouldn't be a big performance hit.
+ *
+ * Macro-izing this makes it easier to add debugging code
+ * to track unreleased references.
+ */
+#ifdef DBJAVA_DEBUG
+#include <unistd.h>
+static void wrdebug(const char *str)
+{
+ write(2, str, strlen(str));
+ write(2, "\n", 1);
+}
+
+static jobject debug_new_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ return ((*jnienv)->NewGlobalRef(jnienv, obj));
+}
+
+static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
+{
+ wrdebug(s);
+ (*jnienv)->DeleteGlobalRef(jnienv, obj);
+}
+
+#define NEW_GLOBAL_REF(jnienv, obj) \
+ debug_new_global_ref(jnienv, obj, "+Ref: " #obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) \
+ debug_delete_global_ref(jnienv, obj, "-Ref: " #obj)
+#else
+#define NEW_GLOBAL_REF(jnienv, obj) (*jnienv)->NewGlobalRef(jnienv, obj)
+#define DELETE_GLOBAL_REF(jnienv, obj) (*jnienv)->DeleteGlobalRef(jnienv, obj)
+#define wrdebug(x)
+#endif
+
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv);
+
+/*
+ * Get the current JNIEnv from the java VM.
+ * If the jvm argument is null, uses the default
+ * jvm stored during the first invocation.
+ */
+JNIEnv *get_jnienv(JavaVM *jvm);
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_dbobj(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void *get_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj);
+
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
+ * The private data is stored in the object as a Java long (64 bits),
+ * which is long enough to store a pointer on current architectures.
+ */
+void set_private_info(JNIEnv *jnienv, const char *classname,
+ jobject obj, void *value);
+
+/*
+ * Given a non-qualified name (e.g. "foo"), get the class handle
+ * for the fully qualified name (e.g. "com.sleepycat.db.foo")
+ */
+jclass get_class(JNIEnv *jnienv, const char *classname);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be a DB object type.
+ */
+void set_object_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *object_classname,
+ const char *name_of_field, jobject obj);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_int_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jint value);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an integer type.
+ */
+void set_long_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, jlong value);
+
+/*
+ * Set an individual field in a Db* object.
+ * The field must be an DbLsn type.
+ */
+void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
+ jobject jthis, const char *name_of_field, DB_LSN value);
+
+/*
+ * Values of flags for verify_return() and report_exception().
+ * These indicate what sort of exceptions the method may throw
+ * (in addition to DbException).
+ */
+static const u_int32_t EXCEPTION_FILE_NOT_FOUND = 0x0001; /*FileNotFound*/
+
+/*
+ * Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask);
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
+ */
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index);
+
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
+ */
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept);
+
+/*
+ * Report an error via the errcall mechanism.
+ */
+void report_errcall(JNIEnv *jnienv, jobject errcall,
+ jstring prefix, const char *message);
+
+/*
+ * If the object is null, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_non_null(JNIEnv *jnienv, void *obj);
+
+/*
+ * If the error code is non-zero, report an exception and return false (0),
+ * otherwise return true (1).
+ */
+int verify_return(JNIEnv *jnienv, int err, unsigned long flags);
+
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *locked_dbt);
+
+/*
+ * Create an object of the given class, calling its default constructor.
+ */
+jobject create_default_object(JNIEnv *jnienv, const char *class_name);
+
+/*
+ * Create a Dbt object, , calling its default constructor.
+ */
+jobject create_dbt(JNIEnv *jnienv, const char *class_name);
+
+/*
+ * Convert an DB object to a Java encapsulation of that object.
+ * Note: This implementation creates a new Java object on each call,
+ * so it is generally useful when a new DB object has just been created.
+ */
+jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj);
+
+/*
+ * Create a copy of the java string using __os_malloc.
+ * Caller must free it.
+ */
+char *get_c_string(JNIEnv *jnienv, jstring jstr);
+
+/*
+ * Create a java string from the given string
+ */
+jstring get_java_string(JNIEnv *jnienv, const char* string);
+
+/*
+ * Convert a java object to the various C pointers they represent.
+ */
+DB *get_DB (JNIEnv *jnienv, jobject obj);
+DB_BTREE_STAT *get_DB_BTREE_STAT (JNIEnv *jnienv, jobject obj);
+DBC *get_DBC (JNIEnv *jnienv, jobject obj);
+DB_ENV *get_DB_ENV (JNIEnv *jnienv, jobject obj);
+DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_HASH_STAT *get_DB_HASH_STAT (JNIEnv *jnienv, jobject obj);
+DB_JAVAINFO *get_DB_JAVAINFO (JNIEnv *jnienv, jobject obj);
+DB_LOCK *get_DB_LOCK (JNIEnv *jnienv, jobject obj);
+DB_LOGC *get_DB_LOGC (JNIEnv *jnienv, jobject obj);
+DB_LOG_STAT *get_DB_LOG_STAT (JNIEnv *jnienv, jobject obj);
+DB_LSN *get_DB_LSN (JNIEnv *jnienv, jobject obj);
+DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj);
+DB_MPOOL_STAT *get_DB_MPOOL_STAT (JNIEnv *jnienv, jobject obj);
+DB_QUEUE_STAT *get_DB_QUEUE_STAT (JNIEnv *jnienv, jobject obj);
+DB_TXN *get_DB_TXN (JNIEnv *jnienv, jobject obj);
+DB_TXN_STAT *get_DB_TXN_STAT (JNIEnv *jnienv, jobject obj);
+DBT *get_DBT (JNIEnv *jnienv, jobject obj);
+DBT_JAVAINFO *get_DBT_JAVAINFO (JNIEnv *jnienv, jobject obj);
+
+/*
+ * From a C object, create a Java object.
+ */
+jobject get_DbBtreeStat (JNIEnv *jnienv, DB_BTREE_STAT *dbobj);
+jobject get_Dbc (JNIEnv *jnienv, DBC *dbobj);
+jobject get_DbHashStat (JNIEnv *jnienv, DB_HASH_STAT *dbobj);
+jobject get_DbLogc (JNIEnv *jnienv, DB_LOGC *dbobj);
+jobject get_DbLogStat (JNIEnv *jnienv, DB_LOG_STAT *dbobj);
+jobject get_DbLsn (JNIEnv *jnienv, DB_LSN dbobj);
+jobject get_DbMpoolStat (JNIEnv *jnienv, DB_MPOOL_STAT *dbobj);
+jobject get_DbMpoolFStat (JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj);
+jobject get_DbQueueStat (JNIEnv *jnienv, DB_QUEUE_STAT *dbobj);
+jobject get_const_Dbt (JNIEnv *jnienv, const DBT *dbt, DBT_JAVAINFO **retp);
+jobject get_Dbt (JNIEnv *jnienv, DBT *dbt, DBT_JAVAINFO **retp);
+jobject get_DbTxn (JNIEnv *jnienv, DB_TXN *dbobj);
+jobject get_DbTxnStat (JNIEnv *jnienv, DB_TXN_STAT *dbobj);
+
+/* The java names of DB classes */
+extern const char * const name_DB;
+extern const char * const name_DB_BTREE_STAT;
+extern const char * const name_DBC;
+extern const char * const name_DB_DEADLOCK_EX;
+extern const char * const name_DB_ENV;
+extern const char * const name_DB_EXCEPTION;
+extern const char * const name_DB_HASH_STAT;
+extern const char * const name_DB_LOCK;
+extern const char * const name_DB_LOCK_STAT;
+extern const char * const name_DB_LOGC;
+extern const char * const name_DB_LOG_STAT;
+extern const char * const name_DB_LSN;
+extern const char * const name_DB_MEMORY_EX;
+extern const char * const name_DB_MPOOL_FSTAT;
+extern const char * const name_DB_MPOOL_STAT;
+extern const char * const name_DB_LOCKNOTGRANTED_EX;
+extern const char * const name_DB_PREPLIST;
+extern const char * const name_DB_QUEUE_STAT;
+extern const char * const name_DB_REP_STAT;
+extern const char * const name_DB_RUNRECOVERY_EX;
+extern const char * const name_DBT;
+extern const char * const name_DB_TXN;
+extern const char * const name_DB_TXN_STAT;
+extern const char * const name_DB_TXN_STAT_ACTIVE;
+extern const char * const name_DB_UTIL;
+extern const char * const name_DbAppendRecno;
+extern const char * const name_DbBtreeCompare;
+extern const char * const name_DbBtreePrefix;
+extern const char * const name_DbDupCompare;
+extern const char * const name_DbEnvFeedback;
+extern const char * const name_DbErrcall;
+extern const char * const name_DbFeedback;
+extern const char * const name_DbHash;
+extern const char * const name_DbRecoveryInit;
+extern const char * const name_DbRepTransport;
+extern const char * const name_DbSecondaryKeyCreate;
+extern const char * const name_DbTxnRecover;
+extern const char * const name_RepElectResult;
+extern const char * const name_RepProcessMessage;
+
+extern const char * const string_signature;
+
+extern jfieldID fid_Dbt_data;
+extern jfieldID fid_Dbt_offset;
+extern jfieldID fid_Dbt_size;
+extern jfieldID fid_Dbt_ulen;
+extern jfieldID fid_Dbt_dlen;
+extern jfieldID fid_Dbt_doff;
+extern jfieldID fid_Dbt_flags;
+extern jfieldID fid_Dbt_must_create_data;
+extern jfieldID fid_DbLockRequest_op;
+extern jfieldID fid_DbLockRequest_mode;
+extern jfieldID fid_DbLockRequest_timeout;
+extern jfieldID fid_DbLockRequest_obj;
+extern jfieldID fid_DbLockRequest_lock;
+extern jfieldID fid_RepProcessMessage_envid;
+
+#define JAVADB_ARGS JNIEnv *jnienv, jobject jthis
+
+#define JAVADB_GET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT j_fieldtype JNICALL \
+ Java_com_sleepycat_db_##j_class##_get_1##j_field \
+ (JAVADB_ARGS) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ return (db->c_field); \
+ return (0); \
+}
+
+#define JAVADB_SET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JAVADB_ARGS, j_fieldtype value) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ db->c_field = value; \
+}
+
+#define JAVADB_METHOD(_meth, _argspec, c_type, c_meth, _args) \
+JNIEXPORT void JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return; \
+ ret = c_this->c_meth _args; \
+ if (!DB_RETOK_STD(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
+}
+
+#define JAVADB_METHOD_INT(_meth, _argspec, c_type, c_meth, _args, _retok) \
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return (0); \
+ ret = c_this->c_meth _args; \
+ if (!_retok(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
+ return ((jint)ret); \
+}
+
+#define JAVADB_SET_METH(j_class, j_type, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, j_type val), c_type, \
+ set_##c_field, (c_this, val))
+
+#define JAVADB_SET_METH_STR(j_class, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, jstring val), c_type, \
+ set_##c_field, (c_this, (*jnienv)->GetStringUTFChars(jnienv, val, NULL)))
+
+
+/*
+ * These macros are used by code generated by the s_java script.
+ */
+#define JAVADB_STAT_INT(env, cl, jobj, statp, name) \
+ set_int_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LSN(env, cl, jobj, statp, name) \
+ set_lsn_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LONG(env, cl, jobj, statp, name) \
+ set_long_field(jnienv, cl, jobj, #name, statp->name)
+
+/*
+ * We build the active list separately.
+ */
+#define JAVADB_STAT_ACTIVE(env, cl, jobj, statp, name) \
+ do {} while(0)
+
+#endif /* !_JAVA_UTIL_H_ */
diff --git a/storage/bdb/lock/Design b/storage/bdb/lock/Design
new file mode 100644
index 00000000000..f0bb5c6e99c
--- /dev/null
+++ b/storage/bdb/lock/Design
@@ -0,0 +1,301 @@
+# $Id: Design,v 11.5 2002/02/01 19:07:18 bostic Exp $
+
+Synchronization in the Locking Subsystem
+
+This is a document that describes how we implemented fine-grain locking
+in the lock manager (that is, locking on a hash bucket level instead of
+locking the entire region). We found that the increase in concurrency
+was not sufficient to warrant the increase in complexity or the additional
+cost of performing each lock operation. Therefore, we don't use this
+any more. Should we have to do fine-grain locking in a future release,
+this would be a reasonable starting point.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+1. Data structures
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+The lock manager maintains 3 different structures:
+
+Objects (__db_lockobj):
+ Describes an object that is locked. When used with DB, this consists
+ of a __db_ilock (a file identifier and a page number).
+
+Lockers (__db_locker):
+ Identifies a specific locker ID and maintains the head of a list of
+ locks held by a locker (for using during transaction commit/abort).
+
+Locks (__db_lock):
+ Describes a particular object lock held on behalf of a particular
+ locker id.
+
+Objects and Lockers reference Locks.
+
+These structures are organized via two synchronized hash tables. Each
+hash table consists of two physical arrays: the array of actual hash
+buckets and an array of mutexes so we can lock individual buckets, rather
+than the whole table.
+
+One hash table contains Objects and the other hash table contains Lockers.
+Objects contain two lists of locks, waiters and holders: holders currently
+hold a lock on the Object, waiters are lock waiting to be granted.
+Lockers are a single linked list that connects the Locks held on behalf
+of the specific locker ID.
+
+In the diagram below:
+
+Locker ID #1 holds a lock on Object #1 (L1) and Object #2 (L5), and is
+waiting on a lock on Object #1 (L3).
+
+Locker ID #2 holds a lock on Object #1 (L2) and is waiting on a lock for
+Object #2 (L7).
+
+Locker ID #3 is waiting for a lock on Object #2 (L6).
+
+ OBJECT -----------------------
+ HASH | |
+ ----|------------- |
+ ________ _______ | | ________ | |
+ | |-->| O1 |--|---|-->| O2 | | |
+ |_______| |_____| | | |______| V |
+ | | W H--->L1->L2 W H--->L5 | holders
+ |_______| | | | | V
+ | | ------->L3 \ ------->L6------>L7 waiters
+ |_______| / \ \
+ . . / \ \
+ . . | \ \
+ . . | \ -----------
+ |_______| | -------------- |
+ | | ____|____ ___|_____ _|______
+ |_______| | | | | | |
+ | | | LID1 | | LID2 | | LID3 |
+ |_______| |_______| |_______| |______|
+ ^ ^ ^
+ | | |
+ ___|________________________|________|___
+ LOCKER | | | | | | | | |
+ HASH | | | | | | | | |
+ | | | | | | | | |
+ |____|____|____|____|____|____|____|____|
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+2. Synchronization
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+There are four types of mutexes in the subsystem.
+
+Object mutexes;
+ These map one-to-one to each bucket in the Object hash table.
+ Holding a mutex on an Object bucket secures all the Objects in
+ that bucket as well as the Lock structures linked from those
+ Objects. All fields in the Locks EXCEPT the Locker links (the
+ links that attach Locks by Locker ID) are protected by these
+ mutexes.
+
+Locker mutexes:
+ These map one-to-one to each bucket in the Locker hash table.
+ Holding a mutex on a Locker bucket secures the Locker structures
+ and the Locker links in the Locks.
+
+Memory mutex:
+ This mutex allows calls to allocate/free memory, i.e. calls to
+ __db_shalloc and __db_shalloc_free, as well as manipulation of
+ the Object, Locker and Lock free lists.
+
+Region mutex:
+ This mutex is currently only used to protect the locker ids.
+ It may also be needed later to provide exclusive access to
+ the region for deadlock detection.
+
+Creating or removing a Lock requires locking both the Object lock and the
+Locker lock (and eventually the shalloc lock to return the item to the
+free list).
+
+The locking hierarchy is as follows:
+
+ The Region mutex may never be acquired after any other mutex.
+
+ The Object mutex may be acquired after the Region mutex.
+
+ The Locker mutex may be acquired after the Region and Object
+ mutexes.
+
+ The Memory mutex may be acquired after any mutex.
+
+So, if both and Object mutex and a Locker mutex are going to be acquired,
+the Object mutex must be acquired first.
+
+The Memory mutex may be acquired after any other mutex, but no other mutexes
+can be acquired once the Memory mutex is held.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+3. The algorithms:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+The locking subsystem supports four basic operations:
+ Get a Lock (lock_get)
+
+ Release a Lock (lock_put)
+
+ Release all the Locks on a specific Object (lock_vec)
+
+ Release all the Locks for a specific Locker (lock_vec)
+
+Get a lock:
+ Acquire Object bucket mutex.
+ Acquire Locker bucket mutex.
+
+ Acquire Memory mutex.
+ If the Object does not exist
+ Take an Object off the freelist.
+ If the Locker doesn't exist
+ Take a Locker off the freelist.
+ Take a Lock off the free list.
+ Release Memory mutex.
+
+ Add Lock to the Object list.
+ Add Lock to the Locker list.
+ Release Locker bucket mutex
+
+ If the lock cannot be granted
+ Release Object bucket mutex
+ Acquire lock mutex (blocks)
+
+ Acquire Object bucket mutex
+ If lock acquisition did not succeed (e.g, deadlock)
+ Acquire Locker bucket mutex
+ If locker should be destroyed
+ Remove locker from hash table
+ Acquire Memory mutex
+ Return locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex
+
+ If object should be released
+ Acquire Memory mutex
+ Return object to free list
+ Release Memory mutex
+
+ Release Object bucket mutex
+
+Release a lock:
+ Acquire Object bucket mutex.
+ (Requires that we be able to find the Object hash bucket
+ without looking inside the Lock itself.)
+
+ If releasing a single lock and the user provided generation number
+ doesn't match the Lock's generation number, the Lock has been reused
+ and we return failure.
+
+ Enter lock_put_internal:
+ if the Lock is still on the Object's lists:
+ Increment Lock's generation number.
+ Remove Lock from the Object's list (NULL link fields).
+ Promote locks for the Object.
+
+ Enter locker_list_removal
+ Acquire Locker bucket mutex.
+ If Locker doesn't exist:
+ Release Locker bucket mutex
+ Release Object bucket mutex
+ Return error.
+ Else if Locker marked as deleted:
+ dont_release = TRUE
+ Else
+ Remove Lock from Locker list.
+ If Locker has no more locks
+ Remove Locker from table.
+ Acquire Memory mutex.
+ Return Locker to free list
+ Release Memory mutex
+ Release Locker bucket mutex.
+ Exit locker_list_removal
+
+ If (!dont_release)
+ Acquire Memory mutex
+ Return Lock to free list
+ Release Memory mutex
+
+ Exit lock_put_internal
+
+ Release Object bucket mutex
+
+Release all the Locks on a specific Object (lock_vec, DB_PUT_ALL_OBJ):
+
+ Acquire Object bucket mutex.
+
+ For each lock on the waiter list:
+ lock_put_internal
+ For each lock on the holder list:
+ lock_put_internal
+
+ Release Object bucket mutex.
+
+Release all the Locks for a specific Locker (lock_vec, DB_PUT_ALL):
+
+ Acquire Locker bucket mutex.
+ Mark Locker deleted.
+ Release Locker mutex.
+
+ For each lock on the Locker's list:
+ Remove from locker's list
+ (The lock could get put back on the free list in
+ lock_put and then could get reallocated and the
+ act of setting its locker links could clobber us.)
+ Perform "Release a Lock" above: skip locker_list_removal.
+
+ Acquire Locker bucket mutex.
+ Remove Locker
+ Release Locker mutex.
+
+ Acquire Memory mutex
+ Return Locker to free list
+ Release Memory mutex
+
+Deadlock detection (lock_detect):
+
+ For each bucket in Object table
+ Acquire the Object bucket mutex.
+ create waitsfor
+
+ For each bucket in Object table
+ Release the Object mutex.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+FAQ:
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Q: Why do you need generation numbers?
+A: If a lock has been released due to a transaction abort (potentially in a
+ different process), and then lock is released by a thread of control
+ unaware of the abort, the lock might have potentially been re-allocated
+ to a different object. The generation numbers detect this problem.
+
+ Note, we assume that reads/writes of lock generation numbers are atomic,
+ if they are not, it is theoretically possible that a re-allocated lock
+ could be mistaken for another lock.
+
+Q: Why is is safe to walk the Locker list without holding any mutexes at
+ all?
+A: Locks are created with both the Object and Locker bucket mutexes held.
+ Once created, they removed in two ways:
+
+ a) when a specific Lock is released, in which case, the Object and
+ Locker bucket mutexes are again held, and
+
+ b) when all Locks for a specific Locker Id is released.
+
+ In case b), the Locker bucket mutex is held while the Locker chain is
+ marked as "destroyed", which blocks any further access to the Locker
+ chain. Then, each individual Object bucket mutex is acquired when each
+ individual Lock is removed.
+
+Q: What are the implications of doing fine grain locking?
+
+A: Since we no longer globally lock the entire region, lock_vec will no
+ longer be atomic. We still execute the items in a lock_vec in order,
+ so things like lock-coupling still work, but you can't make any
+ guarantees about atomicity.
+
+Q: How do I configure for FINE_GRAIN locking?
+
+A: We currently do not support any automatic configuration for FINE_GRAIN
+ locking. When we do, will need to document that atomicity discussion
+ listed above (it is bug-report #553).
diff --git a/storage/bdb/lock/lock.c b/storage/bdb/lock/lock.c
new file mode 100644
index 00000000000..8eda155b822
--- /dev/null
+++ b/storage/bdb/lock/lock.c
@@ -0,0 +1,1874 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock.c,v 11.108 2002/08/06 06:11:34 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __lock_checklocker __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t));
+static void __lock_freelocker
+ __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
+static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t,
+ const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *));
+static int __lock_getobj
+ __P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **));
+static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
+static int __lock_put_internal __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));
+static void __lock_remove_waiter __P((DB_LOCKTAB *,
+ DB_LOCKOBJ *, struct __db_lock *, db_status_t));
+static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));
+
+static const char __db_lock_err[] = "Lock table is out of available %s";
+static const char __db_lock_invalid[] = "%s: Lock is no longer valid";
+static const char __db_locker_invalid[] = "Locker is not valid";
+
+/*
+ * __lock_id --
+ * Generate a unique locker id.
+ *
+ * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *));
+ */
+int
+__lock_id(dbenv, idp)
+ DB_ENV *dbenv;
+ u_int32_t *idp;
+{
+ DB_LOCKER *lk;
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ u_int32_t *ids, locker_ndx;
+ int nids, ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ ret = 0;
+
+ /*
+ * Allocate a new lock id. If we wrap around then we
+ * find the minimum currently in use and make sure we
+ * can stay below that. This code is similar to code
+ * in __txn_begin_int for recovering txn ids.
+ */
+ LOCKREGION(dbenv, lt);
+ /*
+ * Our current valid range can span the maximum valid value, so check
+ * for it and wrap manually.
+ */
+ if (region->stat.st_id == DB_LOCK_MAXID &&
+ region->stat.st_cur_maxid != DB_LOCK_MAXID)
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ if (region->stat.st_id == region->stat.st_cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (lk = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lk != NULL;
+ lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker))
+ ids[nids++] = lk->id;
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->stat.st_id, &region->stat.st_cur_maxid);
+ __os_free(dbenv, ids);
+ }
+ *idp = ++region->stat.st_id;
+
+ /* Allocate a locker for this id. */
+ LOCKER_LOCK(lt, region, *idp, locker_ndx);
+ ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);
+
+err: UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+/*
+ * __lock_id_free --
+ * Free a locker id.
+ *
+ * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t));
+ */
+int
+__lock_id_free(dbenv, id)
+ DB_ENV *dbenv;
+ u_int32_t id;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, id, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0)
+ goto err;
+ if (sh_locker == NULL) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (sh_locker->nlocks != 0) {
+ __db_err(dbenv, "Locker still has locks");
+ ret = EINVAL;
+ goto err;
+ }
+
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+
+err: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_vec --
+ * Vector lock routine. This function takes a set of operations
+ * and performs them all at once. In addition, lock_vec provides
+ * functionality for lock inheritance, releasing all locks for a
+ * given locker (used during transaction commit/abort), releasing
+ * all locks on a given object, and generating debugging information.
+ *
+ * PUBLIC: int __lock_vec __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ */
+int
+__lock_vec(dbenv, locker, flags, list, nlist, elistp)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ int nlist;
+ DB_LOCKREQ *list, **elistp;
+{
+ struct __db_lock *lp, *next_lock;
+ DB_LOCK lock;
+ DB_LOCKER *sh_locker, *sh_parent;
+ DB_LOCKOBJ *obj, *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t lndx, ndx;
+ int did_abort, i, ret, run_dd, upgrade, writes;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_vec", DB_INIT_LOCK);
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_vec",
+ flags, DB_LOCK_FREE_LOCKER | DB_LOCK_NOWAIT)) != 0)
+ return (ret);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ run_dd = 0;
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ for (i = 0, ret = 0; i < nlist && ret == 0; i++)
+ switch (list[i].op) {
+ case DB_LOCK_GET_TIMEOUT:
+ LF_SET(DB_LOCK_SET_TIMEOUT);
+ case DB_LOCK_GET:
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags, list[i].obj,
+ list[i].mode, list[i].timeout, &list[i].lock);
+ break;
+ case DB_LOCK_INHERIT:
+ /*
+ * Get the committing locker and mark it as deleted.
+ * This allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. However, if the locker doesn't
+ * exist, that just means that the child holds no
+ * locks, so inheritance is easy!
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ if (ret == 0 && sh_locker != NULL)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ break;
+ }
+
+ /* Make sure we are a child transaction. */
+ if (sh_locker->parent_locker == INVALID_ROFF) {
+ __db_err(dbenv, "Not a child transaction");
+ ret = EINVAL;
+ break;
+ }
+ sh_parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, sh_locker->parent_locker);
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /*
+ * Now, lock the parent locker; move locks from
+ * the committing list to the parent's list.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) {
+ if (ret == 0) {
+ __db_err(dbenv,
+ "Parent locker is not valid");
+ ret = EINVAL;
+ }
+ break;
+ }
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
+ SH_LIST_REMOVE(lp, locker_links, __db_lock);
+ SH_LIST_INSERT_HEAD(&sh_parent->heldby, lp,
+ locker_links, __db_lock);
+ lp->holder = sh_parent->id;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+
+ (void)__lock_promote(lt, obj,
+ LF_ISSET(DB_LOCK_NOWAITERS));
+ }
+
+ /* Transfer child counts to parent. */
+ sh_parent->nlocks += sh_locker->nlocks;
+ sh_parent->nwrites += sh_locker->nwrites;
+
+ /* Now free the original locker. */
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL);
+ break;
+ case DB_LOCK_PUT:
+ ret = __lock_put_nolock(dbenv,
+ &list[i].lock, &run_dd, flags);
+ break;
+ case DB_LOCK_PUT_ALL:
+ case DB_LOCK_PUT_READ:
+ case DB_LOCK_UPGRADE_WRITE:
+ /*
+ * Get the locker and mark it as deleted. This
+ * allows us to traverse the locker links without
+ * worrying that someone else is deleting locks out
+ * from under us. Since the locker may hold no
+ * locks (i.e., you could call abort before you've
+ * done any work), it's perfectly reasonable for there
+ * to be no locker; this is not an error.
+ */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ /*
+ * If ret is set, then we'll generate an
+ * error. If it's not set, we have nothing
+ * to do.
+ */
+ break;
+ upgrade = 0;
+ writes = 1;
+ if (list[i].op == DB_LOCK_PUT_READ)
+ writes = 0;
+ else if (list[i].op == DB_LOCK_UPGRADE_WRITE) {
+ if (F_ISSET(sh_locker, DB_LOCKER_DIRTY))
+ upgrade = 1;
+ writes = 0;
+ }
+
+ F_SET(sh_locker, DB_LOCKER_DELETED);
+
+ /* Now traverse the locks, releasing each one. */
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;) {
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ if (writes == 1 || lp->mode == DB_LOCK_READ) {
+ SH_LIST_REMOVE(lp,
+ locker_links, __db_lock);
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ SHOBJECT_LOCK(lt, region, sh_obj, lndx);
+ /*
+ * We are not letting lock_put_internal
+ * unlink the lock, so we'll have to
+ * update counts here.
+ */
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites--;
+ ret = __lock_put_internal(lt, lp,
+ lndx, DB_LOCK_FREE | DB_LOCK_DOALL);
+ if (ret != 0)
+ break;
+ lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ } else
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock);
+ }
+ switch (list[i].op) {
+ case DB_LOCK_UPGRADE_WRITE:
+ if (upgrade != 1)
+ goto up_done;
+ for (lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock)) {
+ if (ret != 0)
+ break;
+ lock.off = R_OFFSET(&lt->reginfo, lp);
+ lock.gen = lp->gen;
+ F_SET(sh_locker, DB_LOCKER_INABORT);
+ ret = __lock_get_internal(lt,
+ locker, DB_LOCK_UPGRADE,
+ NULL, DB_LOCK_WRITE, 0, &lock);
+ }
+ up_done:
+ /* FALL THROUGH */
+ case DB_LOCK_PUT_READ:
+ F_CLR(sh_locker, DB_LOCKER_DELETED);
+ break;
+
+ case DB_LOCK_PUT_ALL:
+ if (ret == 0)
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Remove all the locks associated with an object. */
+ OBJECT_LOCK(lt, region, list[i].obj, ndx);
+ if ((ret = __lock_getobj(lt, list[i].obj,
+ ndx, 0, &sh_obj)) != 0 || sh_obj == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ break;
+ }
+
+ /*
+ * Go through both waiters and holders. Don't bother
+ * to run promotion, because everyone is getting
+ * released. The processes waiting will still get
+ * awakened as their waiters are released.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock))
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+
+ /*
+ * On the last time around, the object will get
+ * reclaimed by __lock_put_internal, structure the
+ * loop carefully so we do not get bitten.
+ */
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ ret == 0 && lp != NULL;
+ lp = next_lock) {
+ next_lock = SH_TAILQ_NEXT(lp, links, __db_lock);
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+ }
+ break;
+
+ case DB_LOCK_TIMEOUT:
+ ret = __lock_set_timeout(dbenv,
+ locker, 0, DB_SET_TXN_NOW);
+ region->need_dd = 1;
+ break;
+
+ case DB_LOCK_TRADE:
+ /*
+ * INTERNAL USE ONLY.
+ * Change the holder of the lock described in
+ * list[i].lock to the locker-id specified by
+ * the locker parameter.
+ */
+ /*
+ * You had better know what you're doing here.
+ * We are trading locker-id's on a lock to
+ * facilitate file locking on open DB handles.
+ * We do not do any conflict checking on this,
+ * so heaven help you if you use this flag under
+ * any other circumstances.
+ */
+ ret = __lock_trade(dbenv, &list[i].lock, locker);
+ break;
+#ifdef DEBUG
+ case DB_LOCK_DUMP:
+ /* Find the locker. */
+ LOCKER_LOCK(lt, region, locker, ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ break;
+
+ for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) {
+ __lock_printlock(lt, lp, 1);
+ }
+ break;
+#endif
+ default:
+ __db_err(dbenv,
+ "Invalid lock operation: %d", list[i].op);
+ ret = EINVAL;
+ break;
+ }
+
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
+ run_dd = 1;
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ if (run_dd)
+ (void)dbenv->lock_detect(dbenv, 0, region->detect, &did_abort);
+
+ if (ret != 0 && elistp != NULL)
+ *elistp = &list[i - 1];
+
+ return (ret);
+}
+
+/*
+ * Lock acquisition routines. There are two library interfaces:
+ *
+ * __lock_get --
+ * original lock get interface that takes a locker id.
+ *
+ * All the work for lock_get (and for the GET option of lock_vec) is done
+ * inside of lock_get_internal.
+ *
+ * PUBLIC: int __lock_get __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ */
+int
+__lock_get(dbenv, locker, flags, obj, lock_mode, lock)
+ DB_ENV *dbenv;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ DB_LOCK *lock;
+{
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_get", DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv)) {
+ LOCK_INIT(*lock);
+ return (0);
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_get", flags,
+ DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
+ return (ret);
+
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ ret = __lock_get_internal(dbenv->lk_handle,
+ locker, flags, obj, lock_mode, 0, lock);
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+ return (ret);
+}
+
+static int
+__lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, flags;
+ const DBT *obj;
+ db_lockmode_t lock_mode;
+ db_timeout_t timeout;
+ DB_LOCK *lock;
+{
+ struct __db_lock *newl, *lp, *wwrite;
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx, obj_ndx;
+ int did_abort, ihold, on_locker_list, no_dd, ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ on_locker_list = no_dd = ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /*
+ * If we are not going to reuse this lock, initialize the offset to
+ * invalid so that if we fail it will not look like a valid lock.
+ */
+ if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
+ LOCK_INIT(*lock);
+
+ /* Check that the lock mode is valid. */
+ if ((u_int32_t)lock_mode >= region->stat.st_nmodes) {
+ __db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
+ (u_long)lock_mode);
+ return (EINVAL);
+ }
+
+ /* Allocate a new lock. Optimize for the common case of a grant. */
+ region->stat.st_nrequests++;
+ if ((newl = SH_TAILQ_FIRST(&region->free_locks, __db_lock)) != NULL)
+ SH_TAILQ_REMOVE(&region->free_locks, newl, links, __db_lock);
+ if (newl == NULL) {
+ __db_err(dbenv, __db_lock_err, "locks");
+ return (ENOMEM);
+ }
+ if (++region->stat.st_nlocks > region->stat.st_maxnlocks)
+ region->stat.st_maxnlocks = region->stat.st_nlocks;
+
+ if (obj == NULL) {
+ DB_ASSERT(LOCK_ISSET(*lock));
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj);
+ } else {
+ /* Allocate a shared memory new object. */
+ OBJECT_LOCK(lt, region, obj, lock->ndx);
+ if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)
+ goto err;
+ }
+
+ /* Get the locker, we may need it to find our parent. */
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret = __lock_getlocker(lt, locker,
+ locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) {
+ /*
+ * XXX We cannot tell if we created the object or not,
+ * so we don't kow if we should free it or not.
+ */
+ goto err;
+ }
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Now we have a lock and an object and we need to see if we should
+ * grant the lock. We use a FIFO ordering so we can only grant a
+ * new lock if it does not conflict with anyone on the holders list
+ * OR anyone on the waiters list. The reason that we don't grant if
+ * there's a conflict is that this can lead to starvation (a writer
+ * waiting on a popularly read item will never be granted). The
+ * downside of this is that a waiting reader can prevent an upgrade
+ * from reader to writer, which is not uncommon.
+ *
+ * There is one exception to the no-conflict rule. If a lock is held
+ * by the requesting locker AND the new lock does not conflict with
+ * any other holders, then we grant the lock. The most common place
+ * this happens is when the holder has a WRITE lock and a READ lock
+ * request comes in for the same locker. If we do not grant the read
+ * lock, then we guarantee deadlock.
+ *
+ * In case of conflict, we put the new lock on the end of the waiters
+ * list, unless we are upgrading in which case the locker goes on the
+ * front of the list.
+ */
+ ihold = 0;
+ lp = NULL;
+ if (LF_ISSET(DB_LOCK_SWITCH))
+ goto put_lock;
+
+ wwrite = NULL;
+ for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (locker == lp->holder) {
+ if (lp->mode == lock_mode &&
+ lp->status == DB_LSTAT_HELD) {
+ if (LF_ISSET(DB_LOCK_UPGRADE))
+ goto upgrade;
+
+ /*
+ * Lock is held, so we can increment the
+ * reference count and return this lock.
+ * We do not count reference increments
+ * towards the locks held by the locker.
+ */
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+ lock->mode = lp->mode;
+
+ ret = 0;
+ goto done;
+ } else {
+ ihold = 1;
+ if (lock_mode == DB_LOCK_WRITE &&
+ lp->mode == DB_LOCK_WWRITE)
+ wwrite = lp;
+ }
+ } else if (__lock_is_parent(lt, lp->holder, sh_locker))
+ ihold = 1;
+ else if (CONFLICTS(lt, region, lp->mode, lock_mode))
+ break;
+ }
+
+ /*
+ * If we are looking to upgrade a WWRITE to a WRITE lock
+ * and there were no conflicting locks then we can just
+ * upgrade this lock to the one we want.
+ */
+ if (wwrite != NULL && lp == NULL) {
+ lp = wwrite;
+ lp->mode = lock_mode;
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+ lock->mode = lp->mode;
+
+ ret = 0;
+ goto done;
+ }
+
+ /*
+ * Make the new lock point to the new object, initialize fields.
+ *
+ * This lock is not linked in anywhere, so we can muck with it
+ * without holding any mutexes.
+ */
+put_lock:
+ newl->holder = locker;
+ newl->refcount = 1;
+ newl->mode = lock_mode;
+ newl->obj = SH_PTR_TO_OFF(newl, sh_obj);
+ newl->status = DB_LSTAT_HELD;
+
+ /*
+ * If we are upgrading, then there are two scenarios. Either
+ * we had no conflicts, so we can do the upgrade. Or, there
+ * is a conflict and we should wait at the HEAD of the waiters
+ * list.
+ */
+ if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ if (lp == NULL)
+ goto upgrade;
+
+ /*
+ * There was a conflict, wait. If this is the first waiter,
+ * add the object to the deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ SH_TAILQ_INSERT_HEAD(&sh_obj->waiters, newl, links, __db_lock);
+ goto llist;
+ }
+
+ if (lp == NULL && !ihold)
+ for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ if (CONFLICTS(lt, region, lp->mode, lock_mode) &&
+ locker != lp->holder)
+ break;
+ }
+ if (!LF_ISSET(DB_LOCK_SWITCH) && lp == NULL)
+ SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links);
+ else if (!LF_ISSET(DB_LOCK_NOWAIT)) {
+ /*
+ * If this is the first waiter, add the object to the
+ * deadlock detector's list.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_INSERT_HEAD(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
+ } else {
+ ret = DB_LOCK_NOTGRANTED;
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+ region->stat.st_nnowaits++;
+ goto err;
+ }
+
+llist:
+ /*
+ * Now, insert the lock onto its locker's list. If the locker does
+ * not currently hold any locks, there's no reason to run a deadlock
+ * detector, save that information.
+ */
+ on_locker_list = 1;
+ no_dd = sh_locker->master_locker == INVALID_ROFF &&
+ SH_LIST_FIRST(&sh_locker->child_locker, __db_locker) == NULL &&
+ SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL;
+
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock);
+
+ if (LF_ISSET(DB_LOCK_SWITCH) || lp != NULL) {
+ if (LF_ISSET(DB_LOCK_SWITCH) &&
+ (ret = __lock_put_nolock(dbenv,
+ lock, &ihold, DB_LOCK_NOWAITERS)) != 0)
+ goto err;
+ /*
+ * This is really a blocker for the thread. It should be
+ * initialized locked, so that when we try to acquire it, we
+ * block.
+ */
+ newl->status = DB_LSTAT_WAITING;
+ region->stat.st_nconflicts++;
+ region->need_dd = 1;
+ /*
+ * First check to see if this txn has expired.
+ * If not then see if the lock timeout is past
+ * the expiration of the txn, if it is, use
+ * the txn expiration time. lk_expire is passed
+ * to avoid an extra call to get the time.
+ */
+ if (__lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)) {
+ newl->status = DB_LSTAT_ABORTED;
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+
+ /*
+ * Remove the lock from the wait queue and if
+ * this was the only lock on the wait queue remove
+ * this object from the deadlock detector object
+ * list.
+ */
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ SH_TAILQ_REMOVE(
+ &sh_obj->waiters, newl, links, __db_lock);
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ /* Clear the timeout, we are done. */
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ goto expired;
+ }
+
+ /*
+ * If a timeout was specified in this call then it
+ * takes priority. If a lock timeout has been specified
+ * for this transaction then use that, otherwise use
+ * the global timeout value.
+ */
+ if (!LF_ISSET(DB_LOCK_SET_TIMEOUT)) {
+ if (F_ISSET(sh_locker, DB_LOCKER_TIMEOUT))
+ timeout = sh_locker->lk_timeout;
+ else
+ timeout = region->lk_timeout;
+ }
+ if (timeout != 0)
+ __lock_expires(dbenv, &sh_locker->lk_expire, timeout);
+ else
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ if (LOCK_TIME_ISVALID(&sh_locker->tx_expire) &&
+ (timeout == 0 || __lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)))
+ sh_locker->lk_expire = sh_locker->tx_expire;
+ UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+ /*
+ * We are about to wait; before waiting, see if the deadlock
+ * detector should be run.
+ */
+ if (region->detect != DB_LOCK_NORUN && !no_dd)
+ (void)dbenv->lock_detect(
+ dbenv, 0, region->detect, &did_abort);
+
+ MUTEX_LOCK(dbenv, &newl->mutex);
+ LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+
+expired: /* Turn off lock timeout. */
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ if (newl->status != DB_LSTAT_PENDING) {
+ (void)__lock_checklocker(lt, newl, newl->holder, 0);
+ switch (newl->status) {
+ case DB_LSTAT_ABORTED:
+ on_locker_list = 0;
+ ret = DB_LOCK_DEADLOCK;
+ break;
+ case DB_LSTAT_NOTEXIST:
+ ret = DB_LOCK_NOTEXIST;
+ break;
+ case DB_LSTAT_EXPIRED:
+ SHOBJECT_LOCK(lt,
+ region, sh_obj, obj_ndx);
+ if ((ret = __lock_put_internal(
+ lt, newl, obj_ndx, 0) != 0))
+ goto err;
+ if (LOCK_TIME_EQUAL(
+ &sh_locker->lk_expire,
+ &sh_locker->tx_expire)) {
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+ return (DB_LOCK_DEADLOCK);
+ } else {
+ region->stat.st_nlocktimeouts++;
+ return (DB_LOCK_NOTGRANTED);
+ }
+ default:
+ ret = EINVAL;
+ break;
+ }
+ goto err;
+ } else if (LF_ISSET(DB_LOCK_UPGRADE)) {
+ /*
+ * The lock that was just granted got put on the
+ * holders list. Since we're upgrading some other
+ * lock, we've got to remove it here.
+ */
+ SH_TAILQ_REMOVE(
+ &sh_obj->holders, newl, links, __db_lock);
+ /*
+ * Ensure that the object is not believed to be on
+ * the object's lists, if we're traversing by locker.
+ */
+ newl->links.stqe_prev = -1;
+ goto upgrade;
+ } else
+ newl->status = DB_LSTAT_HELD;
+ }
+
+ lock->off = R_OFFSET(&lt->reginfo, newl);
+ lock->gen = newl->gen;
+ lock->mode = newl->mode;
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(newl->mode))
+ sh_locker->nwrites++;
+
+ return (0);
+
+upgrade:/*
+ * This was an upgrade, so return the new lock to the free list and
+ * upgrade the mode of the original lock.
+ */
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->mode = lock_mode;
+
+ ret = 0;
+ /* FALLTHROUGH */
+
+done:
+err: newl->status = DB_LSTAT_FREE;
+ region->stat.st_nlocks--;
+ if (on_locker_list) {
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ }
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, newl, links, __db_lock);
+ return (ret);
+}
+
+/*
+ * Lock release routines.
+ *
+ * The user callable one is lock_put and the three we use internally are
+ * __lock_put_nolock, __lock_put_internal and __lock_downgrade.
+ *
+ * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *));
+ */
+int
+__lock_put(dbenv, lock)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+{
+ DB_LOCKTAB *lt;
+ int ret, run_dd;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK);
+
+ if (IS_RECOVERING(dbenv))
+ return (0);
+
+ lt = dbenv->lk_handle;
+
+ LOCKREGION(dbenv, lt);
+ ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
+ UNLOCKREGION(dbenv, lt);
+
+ /*
+ * Only run the lock detector if put told us to AND we are running
+ * in auto-detect mode. If we are not running in auto-detect, then
+ * a call to lock_detect here will 0 the need_dd bit, but will not
+ * actually abort anything.
+ */
+ if (ret == 0 && run_dd)
+ (void)dbenv->lock_detect(dbenv, 0,
+ ((DB_LOCKREGION *)lt->reginfo.primary)->detect, NULL);
+ return (ret);
+}
+
+static int
+__lock_put_nolock(dbenv, lock, runp, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ int *runp;
+ u_int32_t flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ int ret;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ LOCK_INIT(*lock);
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put");
+ return (EINVAL);
+ }
+
+ ret = __lock_put_internal(lt,
+ lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
+
+ *runp = 0;
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
+ *runp = 1;
+
+ return (ret);
+}
+
+/*
+ * __lock_downgrade --
+ * Used to downgrade locks. Currently this is used in two places,
+ * 1) by the concurrent access product to downgrade write locks
+ * back to iwrite locks and 2) to downgrade write-handle locks to read-handle
+ * locks at the end of an open/create.
+ *
+ * PUBLIC: int __lock_downgrade __P((DB_ENV *,
+ * PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t));
+ */
+int
+__lock_downgrade(dbenv, lock, new_mode, flags)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ db_lockmode_t new_mode;
+ u_int32_t flags;
+{
+ struct __db_lock *lockp;
+ DB_LOCKER *sh_locker;
+ DB_LOCKOBJ *obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t indx;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+
+ lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (lock->gen != lockp->gen) {
+ __db_err(dbenv, __db_lock_invalid, "lock_downgrade");
+ ret = EINVAL;
+ goto out;
+ }
+
+ LOCKER_LOCK(lt, region, lockp->holder, indx);
+
+ if ((ret = __lock_getlocker(lt, lockp->holder,
+ indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ goto out;
+ }
+ if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode))
+ sh_locker->nwrites--;
+
+ if (new_mode == DB_LOCK_WWRITE)
+ F_SET(sh_locker, DB_LOCKER_DIRTY);
+
+ lockp->mode = new_mode;
+
+ /* Get the object associated with this lock. */
+ obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ (void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
+
+out: UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+static int
+__lock_put_internal(lt, lockp, obj_ndx, flags)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t obj_ndx, flags;
+{
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int ret, state_changed;
+
+ region = lt->reginfo.primary;
+ ret = state_changed = 0;
+
+ if (!OBJ_LINKS_VALID(lockp)) {
+ /*
+ * Someone removed this lock while we were doing a release
+ * by locker id. We are trying to free this lock, but it's
+ * already been done; all we need to do is return it to the
+ * free list.
+ */
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ return (0);
+ }
+
+ if (LF_ISSET(DB_LOCK_DOALL))
+ region->stat.st_nreleases += lockp->refcount;
+ else
+ region->stat.st_nreleases++;
+
+ if (!LF_ISSET(DB_LOCK_DOALL) && lockp->refcount > 1) {
+ lockp->refcount--;
+ return (0);
+ }
+
+ /* Increment generation number. */
+ lockp->gen++;
+
+ /* Get the object associated with this lock. */
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+
+ /* Remove this lock from its holders/waitlist. */
+ if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING)
+ __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
+ else {
+ SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ }
+
+ if (LF_ISSET(DB_LOCK_NOPROMOTE))
+ state_changed = 0;
+ else
+ state_changed = __lock_promote(lt,
+ sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS));
+
+ if (LF_ISSET(DB_LOCK_UNLINK))
+ ret = __lock_checklocker(lt, lockp, lockp->holder, flags);
+
+ /* Check if object should be reclaimed. */
+ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL &&
+ SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
+ HASHREMOVE_EL(lt->obj_tab,
+ obj_ndx, __db_lockobj, links, sh_obj);
+ if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
+ __db_shalloc_free(lt->reginfo.addr,
+ SH_DBT_PTR(&sh_obj->lockobj));
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ region->stat.st_nobjects--;
+ state_changed = 1;
+ }
+
+ /* Free lock. */
+ if (!LF_ISSET(DB_LOCK_UNLINK) && LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ }
+
+ /*
+ * If we did not promote anyone; we need to run the deadlock
+ * detector again.
+ */
+ if (state_changed == 0)
+ region->need_dd = 1;
+
+ return (ret);
+}
+
+/*
+ * Utility functions; listed alphabetically.
+ */
+
+/*
+ * __lock_checklocker --
+ * If a locker has no more locks, then we can free the object.
+ * Return a boolean indicating whether we freed the object or not.
+ *
+ * Must be called without the locker's lock set.
+ */
+static int
+__lock_checklocker(lt, lockp, locker, flags)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lockp;
+ u_int32_t locker, flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ ret = 0;
+
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ /* If the locker's list is NULL, free up the locker. */
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
+ goto freelock;
+ }
+
+ if (F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
+ LF_CLR(DB_LOCK_FREE);
+ if (!LF_ISSET(DB_LOCK_IGNOREDEL))
+ goto freelock;
+ }
+
+ if (LF_ISSET(DB_LOCK_UNLINK)) {
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+ if (lockp->status == DB_LSTAT_HELD) {
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lockp->mode))
+ sh_locker->nwrites--;
+ }
+ }
+
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
+ __lock_freelocker( lt, region, sh_locker, indx);
+
+freelock:
+ if (LF_ISSET(DB_LOCK_FREE)) {
+ lockp->status = DB_LSTAT_FREE;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_locks, lockp, links, __db_lock);
+ region->stat.st_nlocks--;
+ }
+
+ return (ret);
+}
+
+/*
+ * __lock_addfamilylocker
+ * Put a locker entry in for a child transaction.
+ *
+ * PUBLIC: int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_addfamilylocker(dbenv, pid, id)
+ DB_ENV *dbenv;
+ u_int32_t pid, id;
+{
+ DB_LOCKER *lockerp, *mlockerp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ /* get/create the parent locker info */
+ LOCKER_LOCK(lt, region, pid, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ pid, ndx, 1, &mlockerp)) != 0)
+ goto err;
+
+ /*
+ * We assume that only one thread can manipulate
+ * a single transaction family.
+ * Therefore the master locker cannot go away while
+ * we manipulate it, nor can another child in the
+ * family be created at the same time.
+ */
+ LOCKER_LOCK(lt, region, id, ndx);
+ if ((ret = __lock_getlocker(dbenv->lk_handle,
+ id, ndx, 1, &lockerp)) != 0)
+ goto err;
+
+ /* Point to our parent. */
+ lockerp->parent_locker = R_OFFSET(&lt->reginfo, mlockerp);
+
+ /* See if this locker is the family master. */
+ if (mlockerp->master_locker == INVALID_ROFF)
+ lockerp->master_locker = R_OFFSET(&lt->reginfo, mlockerp);
+ else {
+ lockerp->master_locker = mlockerp->master_locker;
+ mlockerp = R_ADDR(&lt->reginfo, mlockerp->master_locker);
+ }
+
+ /*
+ * Link the child at the head of the master's list.
+ * The guess is when looking for deadlock that
+ * the most recent child is the one thats blocked.
+ */
+ SH_LIST_INSERT_HEAD(
+ &mlockerp->child_locker, lockerp, child_link, __db_locker);
+
+err:
+ UNLOCKREGION(dbenv, lt);
+
+ return (ret);
+}
+
+/*
+ * __lock_freefamilylocker
+ * Remove a locker from the hash table and its family.
+ *
+ * This must be called without the locker bucket locked.
+ *
+ * PUBLIC: int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+ */
+int
+__lock_freefamilylocker(lt, locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t indx;
+ int ret;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, locker, indx);
+
+ if ((ret = __lock_getlocker(lt,
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL)
+ goto freelock;
+
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) {
+ ret = EINVAL;
+ __db_err(dbenv, "Freeing locker with locks");
+ goto freelock;
+ }
+
+ /* If this is part of a family, we must fix up its links. */
+ if (sh_locker->master_locker != INVALID_ROFF)
+ SH_LIST_REMOVE(sh_locker, child_link, __db_locker);
+
+ __lock_freelocker(lt, region, sh_locker, indx);
+
+freelock:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_freelocker
+ * common code for deleting a locker.
+ *
+ * This must be called with the locker bucket locked.
+ */
+static void
+__lock_freelocker(lt, region, sh_locker, indx)
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ DB_LOCKER *sh_locker;
+ u_int32_t indx;
+
+{
+ HASHREMOVE_EL(
+ lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ SH_TAILQ_REMOVE(&region->lockers, sh_locker, ulinks, __db_locker);
+ region->stat.st_nlockers--;
+}
+
+/*
+ * __lock_set_timeout
+ * -- set timeout values in shared memory.
+ * This is called from the transaction system.
+ * We either set the time that this tranaction expires or the
+ * amount of time that a lock for this transaction is permitted
+ * to wait.
+ *
+ * PUBLIC: int __lock_set_timeout __P(( DB_ENV *,
+ * PUBLIC: u_int32_t, db_timeout_t, u_int32_t));
+ */
+int
+__lock_set_timeout(dbenv, locker, timeout, op)
+ DB_ENV *dbenv;
+ u_int32_t locker;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ ret = __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker);
+ UNLOCKREGION(dbenv, lt);
+ if (ret != 0)
+ return (ret);
+
+ if (op == DB_SET_TXN_TIMEOUT) {
+ if (timeout == 0)
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ else
+ __lock_expires(dbenv, &sh_locker->tx_expire, timeout);
+ } else if (op == DB_SET_LOCK_TIMEOUT) {
+ sh_locker->lk_timeout = timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ } else if (op == DB_SET_TXN_NOW) {
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ __lock_expires(dbenv, &sh_locker->tx_expire, 0);
+ sh_locker->lk_expire = sh_locker->tx_expire;
+ } else
+ return (EINVAL);
+
+ return (0);
+}
+
+/*
+ * __lock_inherit_timeout
+ * -- inherit timeout values from parent locker.
+ * This is called from the transaction system. This will
+ * return EINVAL if the parent does not exist or did not
+ * have a current txn timeout set.
+ *
+ * PUBLIC: int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_inherit_timeout(dbenv, parent, locker)
+ DB_ENV *dbenv;
+ u_int32_t parent, locker;
+{
+ DB_LOCKER *parent_locker, *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ ret = 0;
+ LOCKREGION(dbenv, lt);
+
+ /* If the parent does not exist, we are done. */
+ LOCKER_LOCK(lt, region, parent, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ parent, locker_ndx, 0, &parent_locker)) != 0)
+ goto err;
+
+ /*
+ * If the parent is not there yet, thats ok. If it
+ * does not have any timouts set, then avoid creating
+ * the child locker at this point.
+ */
+ if (parent_locker == NULL ||
+ (LOCK_TIME_ISVALID(&parent_locker->tx_expire) &&
+ !F_ISSET(parent_locker, DB_LOCKER_TIMEOUT))) {
+ ret = EINVAL;
+ goto done;
+ }
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, locker_ndx, 1, &sh_locker)) != 0)
+ goto err;
+
+ sh_locker->tx_expire = parent_locker->tx_expire;
+
+ if (F_ISSET(parent_locker, DB_LOCKER_TIMEOUT)) {
+ sh_locker->lk_timeout = parent_locker->lk_timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ if (!LOCK_TIME_ISVALID(&parent_locker->tx_expire))
+ ret = EINVAL;
+ }
+
+done:
+err:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_getlocker --
+ * Get a locker in the locker hash table. The create parameter
+ * indicates if the locker should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the locker bucket locked.
+ *
+ * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *,
+ * PUBLIC: u_int32_t, u_int32_t, int, DB_LOCKER **));
+ */
+int
+__lock_getlocker(lt, locker, indx, create, retp)
+ DB_LOCKTAB *lt;
+ u_int32_t locker, indx;
+ int create;
+ DB_LOCKER **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ HASHLOOKUP(lt->locker_tab,
+ indx, __db_locker, links, locker, sh_locker, __lock_locker_cmp);
+
+ /*
+ * If we found the locker, then we can just return it. If
+ * we didn't find the locker, then we need to create it.
+ */
+ if (sh_locker == NULL && create) {
+ /* Create new locker and then insert it into hash table. */
+ if ((sh_locker = SH_TAILQ_FIRST(
+ &region->free_lockers, __db_locker)) == NULL) {
+ __db_err(dbenv, __db_lock_err, "locker entries");
+ return (ENOMEM);
+ }
+ SH_TAILQ_REMOVE(
+ &region->free_lockers, sh_locker, links, __db_locker);
+ if (++region->stat.st_nlockers > region->stat.st_maxnlockers)
+ region->stat.st_maxnlockers = region->stat.st_nlockers;
+
+ sh_locker->id = locker;
+ sh_locker->dd_id = 0;
+ sh_locker->master_locker = INVALID_ROFF;
+ sh_locker->parent_locker = INVALID_ROFF;
+ SH_LIST_INIT(&sh_locker->child_locker);
+ sh_locker->flags = 0;
+ SH_LIST_INIT(&sh_locker->heldby);
+ sh_locker->nlocks = 0;
+ sh_locker->nwrites = 0;
+ sh_locker->lk_timeout = 0;
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ if (locker < TXN_MINIMUM && region->tx_timeout != 0)
+ __lock_expires(dbenv,
+ &sh_locker->tx_expire, region->tx_timeout);
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(&region->lockers,
+ sh_locker, ulinks, __db_locker);
+ }
+
+ *retp = sh_locker;
+ return (0);
+}
+
+/*
+ * __lock_getobj --
+ * Get an object in the object hash table. The create parameter
+ * indicates if the object should be created if it doesn't exist in
+ * the table.
+ *
+ * This must be called with the object bucket locked.
+ */
+static int
+__lock_getobj(lt, obj, ndx, create, retp)
+ DB_LOCKTAB *lt;
+ const DBT *obj;
+ u_int32_t ndx;
+ int create;
+ DB_LOCKOBJ **retp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ int ret;
+ void *p;
+
+ dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+
+ /* Look up the object in the hash table. */
+ HASHLOOKUP(lt->obj_tab,
+ ndx, __db_lockobj, links, obj, sh_obj, __lock_cmp);
+
+ /*
+ * If we found the object, then we can just return it. If
+ * we didn't find the object, then we need to create it.
+ */
+ if (sh_obj == NULL && create) {
+ /* Create new object and then insert it into hash table. */
+ if ((sh_obj =
+ SH_TAILQ_FIRST(&region->free_objs, __db_lockobj)) == NULL) {
+ __db_err(lt->dbenv, __db_lock_err, "object entries");
+ ret = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * If we can fit this object in the structure, do so instead
+ * of shalloc-ing space for it.
+ */
+ if (obj->size <= sizeof(sh_obj->objdata))
+ p = sh_obj->objdata;
+ else if ((ret = __db_shalloc(
+ lt->reginfo.addr, obj->size, 0, &p)) != 0) {
+ __db_err(dbenv, "No space for lock object storage");
+ goto err;
+ }
+
+ memcpy(p, obj->data, obj->size);
+
+ SH_TAILQ_REMOVE(
+ &region->free_objs, sh_obj, links, __db_lockobj);
+ if (++region->stat.st_nobjects > region->stat.st_maxnobjects)
+ region->stat.st_maxnobjects = region->stat.st_nobjects;
+
+ SH_TAILQ_INIT(&sh_obj->waiters);
+ SH_TAILQ_INIT(&sh_obj->holders);
+ sh_obj->lockobj.size = obj->size;
+ sh_obj->lockobj.off = SH_PTR_TO_OFF(&sh_obj->lockobj, p);
+
+ HASHINSERT(lt->obj_tab, ndx, __db_lockobj, links, sh_obj);
+ }
+
+ *retp = sh_obj;
+ return (0);
+
+err: return (ret);
+}
+
+/*
+ * __lock_is_parent --
+ * Given a locker and a transaction, return 1 if the locker is
+ * an ancestor of the designcated transaction. This is used to determine
+ * if we should grant locks that appear to conflict, but don't because
+ * the lock is already held by an ancestor.
+ */
+static int
+__lock_is_parent(lt, locker, sh_locker)
+ DB_LOCKTAB *lt;
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ DB_LOCKER *parent;
+
+ parent = sh_locker;
+ while (parent->parent_locker != INVALID_ROFF) {
+ parent = (DB_LOCKER *)
+ R_ADDR(&lt->reginfo, parent->parent_locker);
+ if (parent->id == locker)
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_promote --
+ *
+ * Look through the waiters and holders lists and decide which (if any)
+ * locks can be promoted. Promote any that are eligible.
+ *
+ * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+ */
+int
+__lock_promote(lt, obj, flags)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *obj;
+ u_int32_t flags;
+{
+ struct __db_lock *lp_w, *lp_h, *next_waiter;
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int had_waiters, state_changed;
+
+ region = lt->reginfo.primary;
+ had_waiters = 0;
+
+ /*
+ * We need to do lock promotion. We also need to determine if we're
+ * going to need to run the deadlock detector again. If we release
+ * locks, and there are waiters, but no one gets promoted, then we
+ * haven't fundamentally changed the lockmgr state, so we may still
+ * have a deadlock and we have to run again. However, if there were
+ * no waiters, or we actually promoted someone, then we are OK and we
+ * don't have to run it immediately.
+ *
+ * During promotion, we look for state changes so we can return this
+ * information to the caller.
+ */
+
+ for (lp_w = SH_TAILQ_FIRST(&obj->waiters, __db_lock),
+ state_changed = lp_w == NULL;
+ lp_w != NULL;
+ lp_w = next_waiter) {
+ had_waiters = 1;
+ next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock);
+
+ /* Waiter may have aborted or expired. */
+ if (lp_w->status != DB_LSTAT_WAITING)
+ continue;
+ /* Are we switching locks? */
+ if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT)
+ continue;
+
+ if (LF_ISSET(DB_LOCK_REMOVE)) {
+ __lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST);
+ continue;
+ }
+ for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
+ lp_h != NULL;
+ lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
+ if (lp_h->holder != lp_w->holder &&
+ CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) {
+ LOCKER_LOCK(lt,
+ region, lp_w->holder, locker_ndx);
+ if ((__lock_getlocker(lt, lp_w->holder,
+ locker_ndx, 0, &sh_locker)) != 0) {
+ DB_ASSERT(0);
+ break;
+ }
+ if (!__lock_is_parent(lt,
+ lp_h->holder, sh_locker))
+ break;
+ }
+ }
+ if (lp_h != NULL) /* Found a conflict. */
+ break;
+
+ /* No conflict, promote the waiting lock. */
+ SH_TAILQ_REMOVE(&obj->waiters, lp_w, links, __db_lock);
+ lp_w->status = DB_LSTAT_PENDING;
+ SH_TAILQ_INSERT_TAIL(&obj->holders, lp_w, links);
+
+ /* Wake up waiter. */
+ MUTEX_UNLOCK(lt->dbenv, &lp_w->mutex);
+ state_changed = 1;
+ }
+
+ /*
+ * If this object had waiters and doesn't any more, then we need
+ * to remove it from the dd_obj list.
+ */
+ if (had_waiters && SH_TAILQ_FIRST(&obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs, obj, dd_links, __db_lockobj);
+ return (state_changed);
+}
+
+/*
+ * __lock_remove_waiter --
+ * Any lock on the waitlist has a process waiting for it. Therefore,
+ * we can't return the lock to the freelist immediately. Instead, we can
+ * remove the lock from the list of waiters, set the status field of the
+ * lock, and then let the process waking up return the lock to the
+ * free list.
+ *
+ * This must be called with the Object bucket locked.
+ */
+static void
+__lock_remove_waiter(lt, sh_obj, lockp, status)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *sh_obj;
+ struct __db_lock *lockp;
+ db_status_t status;
+{
+ DB_LOCKREGION *region;
+ int do_wakeup;
+
+ region = lt->reginfo.primary;
+
+ do_wakeup = lockp->status == DB_LSTAT_WAITING;
+
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+ lockp->links.stqe_prev = -1;
+ lockp->status = status;
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(
+ &region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ /*
+ * Wake whoever is waiting on this lock.
+ *
+ * The MUTEX_UNLOCK macro normally resolves to a single argument,
+ * keep the compiler quiet.
+ */
+ if (do_wakeup)
+ MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);
+}
+
+/*
+ * __lock_expires -- set the expire time given the time to live.
+ * We assume that if timevalp is set then it contains "now".
+ * This avoids repeated system calls to get the time.
+ */
+static void
+__lock_expires(dbenv, timevalp, timeout)
+ DB_ENV *dbenv;
+ db_timeval_t *timevalp;
+ db_timeout_t timeout;
+{
+ if (!LOCK_TIME_ISVALID(timevalp))
+ __os_clock(dbenv, &timevalp->tv_sec, &timevalp->tv_usec);
+ if (timeout > 1000000) {
+ timevalp->tv_sec += timeout / 1000000;
+ timevalp->tv_usec += timeout % 1000000;
+ } else
+ timevalp->tv_usec += timeout;
+
+ if (timevalp->tv_usec > 1000000) {
+ timevalp->tv_sec++;
+ timevalp->tv_usec -= 1000000;
+ }
+}
+
+/*
+ * __lock_expired -- determine if a lock has expired.
+ *
+ * PUBLIC: int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *));
+ */
+int
+__lock_expired(dbenv, now, timevalp)
+ DB_ENV *dbenv;
+ db_timeval_t *now, *timevalp;
+{
+ if (!LOCK_TIME_ISVALID(timevalp))
+ return (0);
+
+ if (!LOCK_TIME_ISVALID(now))
+ __os_clock(dbenv, &now->tv_sec, &now->tv_usec);
+
+ return (now->tv_sec > timevalp->tv_sec ||
+ (now->tv_sec == timevalp->tv_sec &&
+ now->tv_usec >= timevalp->tv_usec));
+}
+
+/*
+ * __lock_trade --
+ *
+ * Trade locker ids on a lock. This is used to reassign file locks from
+ * a transactional locker id to a long-lived locker id. This should be
+ * called with the region mutex held.
+ */
+static int
+__lock_trade(dbenv, lock, new_locker)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ u_int32_t new_locker;
+{
+ struct __db_lock *lp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCKER *sh_locker;
+ int ret;
+ u_int32_t locker_ndx;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+
+ /* If the lock is already released, simply return. */
+ if (lp->gen != lock->gen)
+ return (DB_NOTFOUND);
+
+ /* Make sure that we can get new locker and add this lock to it. */
+ LOCKER_LOCK(lt, region, new_locker, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, new_locker, locker_ndx, 0, &sh_locker)) != 0)
+ return (ret);
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ return (EINVAL);
+ }
+
+ /* Remove the lock from its current locker. */
+ if ((ret = __lock_checklocker(lt, lp, lp->holder, DB_LOCK_UNLINK)) != 0)
+ return (ret);
+
+ /* Add lock to its new locker. */
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, lp, locker_links, __db_lock);
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->holder = new_locker;
+
+ return (0);
+}
diff --git a/storage/bdb/lock/lock_deadlock.c b/storage/bdb/lock/lock_deadlock.c
new file mode 100644
index 00000000000..d1461b89a4f
--- /dev/null
+++ b/storage/bdb/lock/lock_deadlock.c
@@ -0,0 +1,886 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_deadlock.c,v 11.54 2002/08/06 05:05:21 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+#include "dbinc/rep.h"
+
+#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32))
+
+#define CLEAR_MAP(M, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ (M)[__i] = 0; \
+}
+
+#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
+#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
+
+#define OR_MAP(D, S, N) { \
+ u_int32_t __i; \
+ for (__i = 0; __i < (N); __i++) \
+ D[__i] |= S[__i]; \
+}
+#define BAD_KILLID 0xffffffff
+
+typedef struct {
+ int valid;
+ int self_wait;
+ u_int32_t count;
+ u_int32_t id;
+ u_int32_t last_lock;
+ u_int32_t last_locker_id;
+ db_pgno_t pgno;
+} locker_info;
+
+static int __dd_abort __P((DB_ENV *, locker_info *));
+static int __dd_build __P((DB_ENV *,
+ u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
+static int __dd_find __P((DB_ENV *,
+ u_int32_t *, locker_info *, u_int32_t, u_int32_t, u_int32_t ***));
+static int __dd_isolder __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+static int __dd_verify __P((locker_info *, u_int32_t *, u_int32_t *,
+ u_int32_t *, u_int32_t, u_int32_t, u_int32_t));
+
+#ifdef DIAGNOSTIC
+static void __dd_debug
+ __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t, u_int32_t));
+#endif
+
+/*
+ * lock_detect --
+ *
+ * PUBLIC: int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ */
+int
+__lock_detect(dbenv, flags, atype, abortp)
+ DB_ENV *dbenv;
+ u_int32_t flags, atype;
+ int *abortp;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_TXNMGR *tmgr;
+ locker_info *idmap;
+ u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
+ u_int32_t i, keeper, killid, limit, nalloc, nlockers;
+ u_int32_t lock_max, txn_max;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_detect", DB_INIT_LOCK);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_detect", flags, 0)) != 0)
+ return (ret);
+ switch (atype) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->lock_detect: unknown deadlock detection mode specified");
+ return (EINVAL);
+ }
+
+ /*
+ * If this environment is a replication client, then we must use the
+ * MINWRITE detection discipline.
+ */
+ if (__rep_is_client(dbenv))
+ atype = DB_LOCK_MINWRITE;
+
+ free_me = NULL;
+
+ lt = dbenv->lk_handle;
+ if (abortp != NULL)
+ *abortp = 0;
+
+ /* Check if a detector run is necessary. */
+ LOCKREGION(dbenv, lt);
+
+ /* Make a pass only if auto-detect would run. */
+ region = lt->reginfo.primary;
+
+ if (region->need_dd == 0) {
+ UNLOCKREGION(dbenv, lt);
+ return (0);
+ }
+
+ /* Reset need_dd, so we know we've run the detector. */
+ region->need_dd = 0;
+
+ /* Build the waits-for bitmap. */
+ ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
+ lock_max = region->stat.st_cur_maxid;
+ UNLOCKREGION(dbenv, lt);
+
+ /*
+ * We need the cur_maxid from the txn region as well. In order
+ * to avoid tricky synchronization between the lock and txn
+ * regions, we simply unlock the lock region and then lock the
+ * txn region. This introduces a small window during which the
+ * transaction system could then wrap. We're willing to return
+ * the wrong answer for "oldest" or "youngest" in those rare
+ * circumstances.
+ */
+ tmgr = dbenv->tx_handle;
+ if (tmgr != NULL) {
+ R_LOCK(dbenv, &tmgr->reginfo);
+ txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
+ R_UNLOCK(dbenv, &tmgr->reginfo);
+ } else
+ txn_max = TXN_MAXIMUM;
+ if (ret != 0 || atype == DB_LOCK_EXPIRE)
+ return (ret);
+
+ if (nlockers == 0)
+ return (0);
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
+ __dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
+#endif
+ /* Now duplicate the bitmaps so we can verify deadlock participants. */
+ if ((ret = __os_calloc(dbenv, (size_t)nlockers,
+ sizeof(u_int32_t) * nalloc, &copymap)) != 0)
+ goto err;
+ memcpy(copymap, bitmap, nlockers * sizeof(u_int32_t) * nalloc);
+
+ if ((ret = __os_calloc(dbenv, sizeof(u_int32_t), nalloc, &tmpmap)) != 0)
+ goto err1;
+
+ /* Find a deadlock. */
+ if ((ret =
+ __dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
+ return (ret);
+
+ killid = BAD_KILLID;
+ free_me = deadp;
+ for (; *deadp != NULL; deadp++) {
+ if (abortp != NULL)
+ ++*abortp;
+ killid = (u_int32_t)((*deadp - bitmap) / nalloc);
+ limit = killid;
+ keeper = BAD_KILLID;
+
+ if (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM)
+ goto dokill;
+ /*
+ * It's conceivable that under XA, the locker could
+ * have gone away.
+ */
+ if (killid == BAD_KILLID)
+ break;
+
+ /*
+ * Start with the id that we know is deadlocked
+ * and then examine all other set bits and see
+ * if any are a better candidate for abortion
+ * and that they are genuinely part of the
+ * deadlock. The definition of "best":
+ * OLDEST: smallest id
+ * YOUNGEST: largest id
+ * MAXLOCKS: maximum count
+ * MINLOCKS: minimum count
+ * MINWRITE: minimum count
+ */
+
+ for (i = (killid + 1) % nlockers;
+ i != limit;
+ i = (i + 1) % nlockers) {
+ if (!ISSET_MAP(*deadp, i))
+ continue;
+ switch (atype) {
+ case DB_LOCK_OLDEST:
+ if (__dd_isolder(idmap[killid].id,
+ idmap[i].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_YOUNGEST:
+ if (__dd_isolder(idmap[i].id,
+ idmap[killid].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MAXLOCKS:
+ if (idmap[i].count < idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ if (idmap[i].count > idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ default:
+ killid = BAD_KILLID;
+ ret = EINVAL;
+ goto dokill;
+ }
+ if (__dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, i))
+ killid = i;
+ }
+
+dokill: if (killid == BAD_KILLID)
+ continue;
+
+ /*
+ * There are cases in which our general algorithm will
+ * fail. Returning 1 from verify indicates that the
+ * particular locker is not only involved in a deadlock,
+ * but that killing him will allow others to make forward
+ * progress. Unfortunately, there are cases where we need
+ * to abort someone, but killing them will not necessarily
+ * ensure forward progress (imagine N readers all trying to
+ * acquire a write lock). In such a scenario, we'll have
+ * gotten all the way through the loop, we will have found
+ * someone to keep (keeper will be valid), but killid will
+ * still be the initial deadlocker. In this case, if the
+ * initial killid satisfies __dd_verify, kill it, else abort
+ * keeper and indicate that we need to run deadlock detection
+ * again.
+ */
+
+ if (keeper != BAD_KILLID && killid == limit &&
+ __dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, killid) == 0) {
+ LOCKREGION(dbenv, lt);
+ region->need_dd = 1;
+ UNLOCKREGION(dbenv, lt);
+ killid = keeper;
+ }
+
+ /* Kill the locker with lockid idmap[killid]. */
+ if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
+ /*
+ * It's possible that the lock was already aborted;
+ * this isn't necessarily a problem, so do not treat
+ * it as an error.
+ */
+ if (ret == DB_ALREADY_ABORTED)
+ ret = 0;
+ else
+ __db_err(dbenv,
+ "warning: unable to abort locker %lx",
+ (u_long)idmap[killid].id);
+ } else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv,
+ "Aborting locker %lx", (u_long)idmap[killid].id);
+ }
+ __os_free(dbenv, tmpmap);
+err1: __os_free(dbenv, copymap);
+
+err: if (free_me != NULL)
+ __os_free(dbenv, free_me);
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, idmap);
+
+ return (ret);
+}
+
+/*
+ * ========================================================================
+ * Utilities
+ */
+
+# define DD_INVALID_ID ((u_int32_t) -1)
+
+static int
+__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
+ DB_ENV *dbenv;
+ u_int32_t atype, **bmp, *nlockers, *allocp;
+ locker_info **idmap;
+{
+ struct __db_lock *lp;
+ DB_LOCKER *lip, *lockerp, *child;
+ DB_LOCKOBJ *op, *lo;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ locker_info *id_array;
+ db_timeval_t now;
+ u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
+ u_int8_t *pptr;
+ int expire_only, is_first, need_timeout, ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCK_SET_TIME_INVALID(&now);
+ need_timeout = 0;
+ expire_only = atype == DB_LOCK_EXPIRE;
+
+ /*
+ * While we always check for expired timeouts, if we are called
+ * with DB_LOCK_EXPIRE, then we are only checking for timeouts
+ * (i.e., not doing deadlock detection at all). If we aren't
+ * doing real deadlock detection, then we can skip a significant,
+ * amount of the processing. In particular we do not build
+ * the conflict array and our caller needs to expect this.
+ */
+ if (expire_only) {
+ count = 0;
+ nentries = 0;
+ goto obj_loop;
+ }
+
+ /*
+ * We'll check how many lockers there are, add a few more in for
+ * good measure and then allocate all the structures. Then we'll
+ * verify that we have enough room when we go back in and get the
+ * mutex the second time.
+ */
+retry: count = region->stat.st_nlockers;
+
+ if (count == 0) {
+ *nlockers = 0;
+ return (0);
+ }
+
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
+ __db_err(dbenv, "%lu lockers", (u_long)count);
+
+ count += 20;
+ nentries = ALIGN(count, 32) / 32;
+
+ /*
+ * Allocate enough space for a count by count bitmap matrix.
+ *
+ * XXX
+ * We can probably save the malloc's between iterations just
+ * reallocing if necessary because count grew by too much.
+ */
+ if ((ret = __os_calloc(dbenv, (size_t)count,
+ sizeof(u_int32_t) * nentries, &bitmap)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv,
+ sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
+ __os_free(dbenv, bitmap);
+ return (ret);
+ }
+
+ if ((ret = __os_calloc(dbenv,
+ (size_t)count, sizeof(locker_info), &id_array)) != 0) {
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
+ return (ret);
+ }
+
+ /*
+ * Now go back in and actually fill in the matrix.
+ */
+ if (region->stat.st_nlockers > count) {
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
+ __os_free(dbenv, id_array);
+ goto retry;
+ }
+
+ /*
+ * First we go through and assign each locker a deadlock detector id.
+ */
+ for (id = 0, lip = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
+ if (F_ISSET(lip, DB_LOCKER_INABORT))
+ continue;
+ if (lip->master_locker == INVALID_ROFF) {
+ lip->dd_id = id++;
+ id_array[lip->dd_id].id = lip->id;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[lip->dd_id].count = lip->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[lip->dd_id].count = lip->nwrites;
+ } else
+ lip->dd_id = DD_INVALID_ID;
+
+ }
+
+ /*
+ * We only need consider objects that have waiters, so we use
+ * the list of objects with waiters (dd_objs) instead of traversing
+ * the entire hash table. For each object, we traverse the waiters
+ * list and add an entry in the waitsfor matrix for each waiter/holder
+ * combination.
+ */
+obj_loop:
+ for (op = SH_TAILQ_FIRST(&region->dd_objs, __db_lockobj);
+ op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
+ if (expire_only)
+ goto look_waiters;
+ CLEAR_MAP(tmpmap, nentries);
+
+ /*
+ * First we go through and create a bit map that
+ * represents all the holders of this object.
+ */
+ for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (F_ISSET(lockerp, DB_LOCKER_INABORT))
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+
+ } else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the holder has already been aborted, then
+ * we should ignore it for now.
+ */
+ if (lp->status == DB_LSTAT_HELD)
+ SET_MAP(tmpmap, dd);
+ }
+
+ /*
+ * Next, for each waiter, we set its row in the matrix
+ * equal to the map of holders we set up above.
+ */
+look_waiters:
+ for (is_first = 1,
+ lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ is_first = 0,
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
+ LOCKER_LOCK(lt, region, lp->holder, ndx);
+ if ((ret = __lock_getlocker(lt,
+ lp->holder, ndx, 0, &lockerp)) != 0)
+ continue;
+ if (lp->status == DB_LSTAT_WAITING) {
+ if (__lock_expired(dbenv,
+ &now, &lockerp->lk_expire)) {
+ lp->status = DB_LSTAT_EXPIRED;
+ MUTEX_UNLOCK(dbenv, &lp->mutex);
+ continue;
+ }
+ need_timeout =
+ LOCK_TIME_ISVALID(&lockerp->lk_expire);
+ }
+
+ if (expire_only)
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+ } else
+ dd = lockerp->dd_id;
+ id_array[dd].valid = 1;
+
+ /*
+ * If the transaction is pending abortion, then
+ * ignore it on this iteration.
+ */
+ if (lp->status != DB_LSTAT_WAITING)
+ continue;
+
+ entryp = bitmap + (nentries * dd);
+ OR_MAP(entryp, tmpmap, nentries);
+ /*
+ * If this is the first waiter on the queue,
+ * then we remove the waitsfor relationship
+ * with oneself. However, if it's anywhere
+ * else on the queue, then we have to keep
+ * it and we have an automatic deadlock.
+ */
+ if (is_first) {
+ if (ISSET_MAP(entryp, dd))
+ id_array[dd].self_wait = 1;
+ CLR_MAP(entryp, dd);
+ }
+ }
+ }
+
+ if (expire_only) {
+ region->need_dd = need_timeout;
+ return (0);
+ }
+
+ /* Now for each locker; record its last lock. */
+ for (id = 0; id < count; id++) {
+ if (!id_array[id].valid)
+ continue;
+ LOCKER_LOCK(lt, region, id_array[id].id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ id_array[id].id, ndx, 0, &lockerp)) != 0) {
+ __db_err(dbenv,
+ "No locks for locker %lu", (u_long)id_array[id].id);
+ continue;
+ }
+
+ /*
+ * If this is a master transaction, try to
+ * find one of its children's locks first,
+ * as they are probably more recent.
+ */
+ child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker);
+ if (child != NULL) {
+ do {
+ lp = SH_LIST_FIRST(&child->heldby, __db_lock);
+ if (lp != NULL &&
+ lp->status == DB_LSTAT_WAITING) {
+ id_array[id].last_locker_id = child->id;
+ goto get_lock;
+ }
+ child = SH_LIST_NEXT(
+ child, child_link, __db_locker);
+ } while (child != NULL);
+ }
+ lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
+ if (lp != NULL) {
+ id_array[id].last_locker_id = lockerp->id;
+ get_lock: id_array[id].last_lock = R_OFFSET(&lt->reginfo, lp);
+ lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ pptr = SH_DBT_PTR(&lo->lockobj);
+ if (lo->lockobj.size >= sizeof(db_pgno_t))
+ memcpy(&id_array[id].pgno,
+ pptr, sizeof(db_pgno_t));
+ else
+ id_array[id].pgno = 0;
+ }
+ }
+
+ /*
+ * Pass complete, reset the deadlock detector bit,
+ * unless we have pending timeouts.
+ */
+ region->need_dd = need_timeout;
+
+ /*
+ * Now we can release everything except the bitmap matrix that we
+ * created.
+ */
+ *nlockers = id;
+ *idmap = id_array;
+ *bmp = bitmap;
+ *allocp = nentries;
+ __os_free(dbenv, tmpmap);
+ return (0);
+}
+
+static int
+__dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
+ DB_ENV *dbenv;
+ u_int32_t *bmp, nlockers, nalloc;
+ locker_info *idmap;
+ u_int32_t ***deadp;
+{
+ u_int32_t i, j, k, *mymap, *tmpmap;
+ u_int32_t **retp;
+ int ndead, ndeadalloc, ret;
+
+#undef INITIAL_DEAD_ALLOC
+#define INITIAL_DEAD_ALLOC 8
+
+ ndeadalloc = INITIAL_DEAD_ALLOC;
+ ndead = 0;
+ if ((ret = __os_malloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t *), &retp)) != 0)
+ return (ret);
+
+ /*
+ * For each locker, OR in the bits from the lockers on which that
+ * locker is waiting.
+ */
+ for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) {
+ if (!idmap[i].valid)
+ continue;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(mymap, j))
+ continue;
+
+ /* Find the map for this bit. */
+ tmpmap = bmp + (nalloc * j);
+ OR_MAP(mymap, tmpmap, nalloc);
+ if (!ISSET_MAP(mymap, i))
+ continue;
+
+ /* Make sure we leave room for NULL. */
+ if (ndead + 2 >= ndeadalloc) {
+ ndeadalloc <<= 1;
+ /*
+ * If the alloc fails, then simply return the
+ * deadlocks that we already have.
+ */
+ if (__os_realloc(dbenv,
+ ndeadalloc * sizeof(u_int32_t),
+ &retp) != 0) {
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+ }
+ }
+ retp[ndead++] = mymap;
+
+ /* Mark all participants in this deadlock invalid. */
+ for (k = 0; k < nlockers; k++)
+ if (ISSET_MAP(mymap, k))
+ idmap[k].valid = 0;
+ break;
+ }
+ }
+ retp[ndead] = NULL;
+ *deadp = retp;
+ return (0);
+}
+
+static int
+__dd_abort(dbenv, info)
+ DB_ENV *dbenv;
+ locker_info *info;
+{
+ struct __db_lock *lockp;
+ DB_LOCKER *lockerp;
+ DB_LOCKOBJ *sh_obj;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+
+ /* Find the locker's last lock. */
+ LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
+ if ((ret = __lock_getlocker(lt,
+ info->last_locker_id, ndx, 0, &lockerp)) != 0 || lockerp == NULL) {
+ if (ret == 0)
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ /* It's possible that this locker was already aborted. */
+ if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+ if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
+ lockp->status != DB_LSTAT_WAITING) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+
+ sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
+ SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+
+ /* Abort lock, take it off list, and wake up this lock. */
+ SHOBJECT_LOCK(lt, region, sh_obj, ndx);
+ lockp->status = DB_LSTAT_ABORTED;
+ SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
+
+ /*
+ * Either the waiters list is now empty, in which case we remove
+ * it from dd_objs, or it is not empty, in which case we need to
+ * do promotion.
+ */
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+ else
+ ret = __lock_promote(lt, sh_obj, 0);
+ MUTEX_UNLOCK(dbenv, &lockp->mutex);
+
+ region->stat.st_ndeadlocks++;
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+
+out: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+#ifdef DIAGNOSTIC
+static void
+__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc)
+ DB_ENV *dbenv;
+ locker_info *idmap;
+ u_int32_t *bitmap, nlockers, nalloc;
+{
+ u_int32_t i, j, *mymap;
+ char *msgbuf;
+
+ __db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
+
+ /* Allocate space to print 10 bytes per item waited on. */
+#undef MSGBUF_LEN
+#define MSGBUF_LEN ((nlockers + 1) * 10 + 64)
+ if (__os_malloc(dbenv, MSGBUF_LEN, &msgbuf) != 0)
+ return;
+
+ for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) {
+ if (!idmap[i].valid)
+ continue;
+ sprintf(msgbuf, /* Waiter. */
+ "%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
+ for (j = 0; j < nlockers; j++)
+ if (ISSET_MAP(mymap, j))
+ sprintf(msgbuf, "%s %lx", msgbuf,
+ (u_long)idmap[j].id);
+ (void)sprintf(msgbuf,
+ "%s %lu", msgbuf, (u_long)idmap[i].last_lock);
+ __db_err(dbenv, msgbuf);
+ }
+
+ __os_free(dbenv, msgbuf);
+}
+#endif
+
+/*
+ * Given a bitmap that contains a deadlock, verify that the bit
+ * specified in the which parameter indicates a transaction that
+ * is actually deadlocked. Return 1 if really deadlocked, 0 otherwise.
+ * deadmap is the array that identified the deadlock.
+ * tmpmap is a copy of the initial bitmaps from the dd_build phase
+ * origmap is a temporary bit map into which we can OR things
+ * nlockers is the number of actual lockers under consideration
+ * nalloc is the number of words allocated for the bitmap
+ * which is the locker in question
+ */
+static int
+__dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which)
+ locker_info *idmap;
+ u_int32_t *deadmap, *tmpmap, *origmap;
+ u_int32_t nlockers, nalloc, which;
+{
+ u_int32_t *tmap;
+ u_int32_t j;
+ int count;
+
+ memset(tmpmap, 0, sizeof(u_int32_t) * nalloc);
+
+ /*
+ * In order for "which" to be actively involved in
+ * the deadlock, removing him from the evaluation
+ * must remove the deadlock. So, we OR together everyone
+ * except which; if all the participants still have their
+ * bits set, then the deadlock persists and which does
+ * not participate. If the deadlock does not persist
+ * then "which" does participate.
+ */
+ count = 0;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+
+ /* Find the map for this bit. */
+ tmap = origmap + (nalloc * j);
+
+ /*
+ * We special case the first waiter who is also a holder, so
+ * we don't automatically call that a deadlock. However, if
+ * it really is a deadlock, we need the bit set now so that
+ * we treat the first waiter like other waiters.
+ */
+ if (idmap[j].self_wait)
+ SET_MAP(tmap, j);
+ OR_MAP(tmpmap, tmap, nalloc);
+ count++;
+ }
+
+ if (count == 1)
+ return (1);
+
+ /*
+ * Now check the resulting map and see whether
+ * all participants still have their bit set.
+ */
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+ if (!ISSET_MAP(tmpmap, j))
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __dd_isolder --
+ *
+ * Figure out the relative age of two lockers. We make all lockers
+ * older than all transactions, because that's how it's worked
+ * historically (because lockers are lower ids).
+ */
+static int
+__dd_isolder(a, b, lock_max, txn_max)
+ u_int32_t a, b;
+ u_int32_t lock_max, txn_max;
+{
+ u_int32_t max;
+
+ /* Check for comparing lock-id and txnid. */
+ if (a <= DB_LOCK_MAXID && b > DB_LOCK_MAXID)
+ return (1);
+ if (b <= DB_LOCK_MAXID && a > DB_LOCK_MAXID)
+ return (0);
+
+ /* In the same space; figure out which one. */
+ max = txn_max;
+ if (a <= DB_LOCK_MAXID)
+ max = lock_max;
+
+ /*
+ * We can't get a 100% correct ordering, because we don't know
+ * where the current interval started and if there were older
+ * lockers outside the interval. We do the best we can.
+ */
+
+ /*
+ * Check for a wrapped case with ids above max.
+ */
+ if (a > max && b < max)
+ return (1);
+ if (b > max && a < max)
+ return (0);
+
+ return (a < b);
+}
diff --git a/storage/bdb/lock/lock_method.c b/storage/bdb/lock/lock_method.c
new file mode 100644
index 00000000000..72703e253bc
--- /dev/null
+++ b/storage/bdb/lock/lock_method.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_method.c,v 11.30 2002/03/27 04:32:20 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
+static int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+static int __lock_set_env_timeout __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+/*
+ * __lock_dbenv_create --
+ * Lock specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ */
+void
+__lock_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->lk_max = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
+ dbenv->set_lk_detect = __dbcl_set_lk_detect;
+ dbenv->set_lk_max = __dbcl_set_lk_max;
+ dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
+ dbenv->lock_detect = __dbcl_lock_detect;
+ dbenv->lock_dump_region = NULL;
+ dbenv->lock_get = __dbcl_lock_get;
+ dbenv->lock_id = __dbcl_lock_id;
+ dbenv->lock_id_free = __dbcl_lock_id_free;
+ dbenv->lock_put = __dbcl_lock_put;
+ dbenv->lock_stat = __dbcl_lock_stat;
+ dbenv->lock_vec = __dbcl_lock_vec;
+ } else
+#endif
+ {
+ dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
+ dbenv->set_lk_detect = __lock_set_lk_detect;
+ dbenv->set_lk_max = __lock_set_lk_max;
+ dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
+ dbenv->set_timeout = __lock_set_env_timeout;
+ dbenv->lock_detect = __lock_detect;
+ dbenv->lock_dump_region = __lock_dump_region;
+ dbenv->lock_get = __lock_get;
+ dbenv->lock_id = __lock_id;
+ dbenv->lock_id_free = __lock_id_free;
+#ifdef CONFIG_TEST
+ dbenv->lock_id_set = __lock_id_set;
+#endif
+ dbenv->lock_put = __lock_put;
+ dbenv->lock_stat = __lock_stat;
+ dbenv->lock_vec = __lock_vec;
+ dbenv->lock_downgrade = __lock_downgrade;
+ }
+}
+
+/*
+ * __lock_dbenv_close --
+ * Lock specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_close __P((DB_ENV *));
+ */
+void
+__lock_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv, dbenv->lk_conflicts);
+ dbenv->lk_conflicts = NULL;
+ }
+}
+
+/*
+ * __lock_set_lk_conflicts
+ * Set the conflicts matrix.
+ */
+static int
+__lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
+ DB_ENV *dbenv;
+ u_int8_t *lk_conflicts;
+ int lk_modes;
+{
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_conflicts");
+
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv, dbenv->lk_conflicts);
+ dbenv->lk_conflicts = NULL;
+ }
+ if ((ret = __os_malloc(dbenv,
+ lk_modes * lk_modes, &dbenv->lk_conflicts)) != 0)
+ return (ret);
+ memcpy(dbenv->lk_conflicts, lk_conflicts, lk_modes * lk_modes);
+ dbenv->lk_modes = lk_modes;
+
+ return (0);
+}
+
+/*
+ * __lock_set_lk_detect
+ * Set the automatic deadlock detection.
+ */
+static int
+__lock_set_lk_detect(dbenv, lk_detect)
+ DB_ENV *dbenv;
+ u_int32_t lk_detect;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_detect");
+
+ switch (lk_detect) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->set_lk_detect: unknown deadlock detection mode specified");
+ return (EINVAL);
+ }
+ dbenv->lk_detect = lk_detect;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max");
+
+ dbenv->lk_max = lk_max;
+ dbenv->lk_max_objects = lk_max;
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_locks
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_locks(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_locks");
+
+ dbenv->lk_max = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_lockers
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_lockers(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_lockers");
+
+ dbenv->lk_max_lockers = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_lk_max_objects
+ * Set the lock table size.
+ */
+static int
+__lock_set_lk_max_objects(dbenv, lk_max)
+ DB_ENV *dbenv;
+ u_int32_t lk_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_max_objects");
+
+ dbenv->lk_max_objects = lk_max;
+ return (0);
+}
+
+/*
+ * __lock_set_env_timeout
+ * Set the lock environment timeout.
+ */
+static int
+__lock_set_env_timeout(dbenv, timeout, flags)
+ DB_ENV *dbenv;
+ db_timeout_t timeout;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+
+ region = NULL;
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOCKING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_timeout", DB_INIT_LOCK));
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ }
+
+ switch (flags) {
+ case DB_SET_LOCK_TIMEOUT:
+ dbenv->lk_timeout = timeout;
+ if (region != NULL)
+ region->lk_timeout = timeout;
+ break;
+ case DB_SET_TXN_TIMEOUT:
+ dbenv->tx_timeout = timeout;
+ if (region != NULL)
+ region->tx_timeout = timeout;
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_ENV->set_timeout", 0));
+ /* NOTREACHED */
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/lock/lock_region.c b/storage/bdb/lock/lock_region.c
new file mode 100644
index 00000000000..6df6937e873
--- /dev/null
+++ b/storage/bdb/lock/lock_region.c
@@ -0,0 +1,417 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_region.c,v 11.69 2002/08/06 05:05:22 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __lock_init __P((DB_ENV *, DB_LOCKTAB *));
+static size_t
+ __lock_region_size __P((DB_ENV *));
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static size_t __lock_region_maint __P((DB_ENV *));
+#endif
+
+/*
+ * The conflict arrays are set up such that the row is the lock you are
+ * holding and the column is the lock that is desired.
+ */
+#define DB_LOCK_RIW_N 9
+static const u_int8_t db_riw_conflicts[] = {
+/* N R W WT IW IR RIW DR WW */
+/* N */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* R */ 0, 0, 1, 0, 1, 0, 1, 0, 1,
+/* W */ 0, 1, 1, 1, 1, 1, 1, 1, 1,
+/* WT */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* IW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* IR */ 0, 0, 1, 0, 0, 0, 0, 0, 1,
+/* RIW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* DR */ 0, 0, 1, 0, 1, 0, 1, 0, 0,
+/* WW */ 0, 1, 1, 0, 1, 1, 1, 0, 1
+};
+
+/*
+ * This conflict array is used for concurrent db access (CDB). It uses
+ * the same locks as the db_riw_conflicts array, but adds an IW mode to
+ * be used for write cursors.
+ */
+#define DB_LOCK_CDB_N 5
+static const u_int8_t db_cdb_conflicts[] = {
+ /* N R W WT IW */
+ /* N */ 0, 0, 0, 0, 0,
+ /* R */ 0, 0, 1, 0, 0,
+ /* W */ 0, 1, 1, 1, 1,
+ /* WT */ 0, 0, 0, 0, 0,
+ /* IW */ 0, 0, 1, 0, 1
+};
+
+/*
+ * __lock_open --
+ * Internal version of lock_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __lock_open __P((DB_ENV *));
+ */
+int
+__lock_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ size_t size;
+ int ret;
+
+ /* Create the lock table structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOCKTAB), &lt)) != 0)
+ return (ret);
+ lt->dbenv = dbenv;
+
+ /* Join/create the lock region. */
+ lt->reginfo.type = REGION_TYPE_LOCK;
+ lt->reginfo.id = INVALID_REGION_ID;
+ lt->reginfo.mode = dbenv->db_mode;
+ lt->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&lt->reginfo, REGION_CREATE_OK);
+ size = __lock_region_size(dbenv);
+ if ((ret = __db_r_attach(dbenv, &lt->reginfo, size)) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ if ((ret = __lock_init(dbenv, lt)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ region = lt->reginfo.primary =
+ R_ADDR(&lt->reginfo, lt->reginfo.rp->primary);
+
+ /* Check for incompatible automatic deadlock detection requests. */
+ if (dbenv->lk_detect != DB_LOCK_NORUN) {
+ if (region->detect != DB_LOCK_NORUN &&
+ dbenv->lk_detect != DB_LOCK_DEFAULT &&
+ region->detect != dbenv->lk_detect) {
+ __db_err(dbenv,
+ "lock_open: incompatible deadlock detector mode");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Upgrade if our caller wants automatic detection, and it
+ * was not currently being done, whether or not we created
+ * the region.
+ */
+ if (region->detect == DB_LOCK_NORUN)
+ region->detect = dbenv->lk_detect;
+ }
+
+ /*
+ * A process joining the region may have reset the lock and transaction
+ * timeouts.
+ */
+ if (dbenv->lk_timeout != 0)
+ region->lk_timeout = dbenv->lk_timeout;
+ if (dbenv->tx_timeout != 0)
+ region->tx_timeout = dbenv->tx_timeout;
+
+ /* Set remaining pointers into region. */
+ lt->conflicts = (u_int8_t *)R_ADDR(&lt->reginfo, region->conf_off);
+ lt->obj_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->obj_off);
+ lt->locker_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->locker_off);
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ dbenv->lk_handle = lt;
+ return (0);
+
+err: if (lt->reginfo.addr != NULL) {
+ if (F_ISSET(&lt->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &lt->reginfo);
+ (void)__db_r_detach(dbenv, &lt->reginfo, 0);
+ }
+ __os_free(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_init --
+ * Initialize the lock region.
+ */
+static int
+__lock_init(dbenv, lt)
+ DB_ENV *dbenv;
+ DB_LOCKTAB *lt;
+{
+ const u_int8_t *lk_conflicts;
+ struct __db_lock *lp;
+ DB_LOCKER *lidp;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *region;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ u_int32_t i, lk_modes;
+ u_int8_t *addr;
+ int ret;
+
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKREGION), 0, &lt->reginfo.primary)) != 0)
+ goto mem_err;
+ lt->reginfo.rp->primary = R_OFFSET(&lt->reginfo, lt->reginfo.primary);
+ region = lt->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ /* Select a conflict matrix if none specified. */
+ if (dbenv->lk_modes == 0)
+ if (CDB_LOCKING(dbenv)) {
+ lk_modes = DB_LOCK_CDB_N;
+ lk_conflicts = db_cdb_conflicts;
+ } else {
+ lk_modes = DB_LOCK_RIW_N;
+ lk_conflicts = db_riw_conflicts;
+ }
+ else {
+ lk_modes = dbenv->lk_modes;
+ lk_conflicts = dbenv->lk_conflicts;
+ }
+
+ region->need_dd = 0;
+ region->detect = DB_LOCK_NORUN;
+ region->lk_timeout = dbenv->lk_timeout;
+ region->tx_timeout = dbenv->tx_timeout;
+ region->locker_t_size = __db_tablesize(dbenv->lk_max_lockers);
+ region->object_t_size = __db_tablesize(dbenv->lk_max_objects);
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_id = 0;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ region->stat.st_maxlocks = dbenv->lk_max;
+ region->stat.st_maxlockers = dbenv->lk_max_lockers;
+ region->stat.st_maxobjects = dbenv->lk_max_objects;
+ region->stat.st_nmodes = lk_modes;
+
+ /* Allocate room for the conflict matrix and initialize it. */
+ if ((ret =
+ __db_shalloc(lt->reginfo.addr, lk_modes * lk_modes, 0, &addr)) != 0)
+ goto mem_err;
+ memcpy(addr, lk_conflicts, lk_modes * lk_modes);
+ region->conf_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the object hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->object_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->object_t_size);
+ region->obj_off = R_OFFSET(&lt->reginfo, addr);
+
+ /* Allocate room for the locker hash table and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ region->locker_t_size * sizeof(DB_HASHTAB), 0, &addr)) != 0)
+ goto mem_err;
+ __db_hashinit(addr, region->locker_t_size);
+ region->locker_off = R_OFFSET(&lt->reginfo, addr);
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ maint_size = __lock_region_maint(dbenv);
+ /* Allocate room for the locker maintenance info and initialize it. */
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(REGMAINT) + maint_size, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&lt->reginfo, addr, maint_size);
+ region->maint_off = R_OFFSET(&lt->reginfo, addr);
+#endif
+
+ /*
+ * Initialize locks onto a free list. Initialize and lock the mutex
+ * so that when we need to block, all we need do is try to acquire
+ * the mutex.
+ */
+ SH_TAILQ_INIT(&region->free_locks);
+ for (i = 0; i < region->stat.st_maxlocks; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
+ goto mem_err;
+ lp->status = DB_LSTAT_FREE;
+ lp->gen = 0;
+ if ((ret = __db_mutex_setup(dbenv, &lt->reginfo, &lp->mutex,
+ MUTEX_NO_RLOCK | MUTEX_SELF_BLOCK)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, &lp->mutex);
+ SH_TAILQ_INSERT_HEAD(&region->free_locks, lp, links, __db_lock);
+ }
+
+ /* Initialize objects onto a free list. */
+ SH_TAILQ_INIT(&region->dd_objs);
+ SH_TAILQ_INIT(&region->free_objs);
+ for (i = 0; i < region->stat.st_maxobjects; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKOBJ), 0, &op)) != 0)
+ goto mem_err;
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_objs, op, links, __db_lockobj);
+ }
+
+ /* Initialize lockers onto a free list. */
+ SH_TAILQ_INIT(&region->lockers);
+ SH_TAILQ_INIT(&region->free_lockers);
+ for (i = 0; i < region->stat.st_maxlockers; ++i) {
+ if ((ret = __db_shalloc(lt->reginfo.addr,
+ sizeof(DB_LOCKER), 0, &lidp)) != 0) {
+mem_err: __db_err(dbenv,
+ "Unable to allocate memory for the lock table");
+ return (ret);
+ }
+ SH_TAILQ_INSERT_HEAD(
+ &region->free_lockers, lidp, links, __db_locker);
+ }
+
+ return (0);
+}
+
+/*
+ * __lock_dbenv_refresh --
+ * Clean up after the lock system on a close or failed open. Called
+ * only from __dbenv_refresh. (Formerly called __lock_close.)
+ *
+ * PUBLIC: int __lock_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__lock_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOCKTAB *lt;
+ int ret;
+
+ lt = dbenv->lk_handle;
+
+ /* Detach from the region. */
+ ret = __db_r_detach(dbenv, &lt->reginfo, 0);
+
+ __os_free(dbenv, lt);
+
+ dbenv->lk_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __lock_region_size --
+ * Return the region size.
+ */
+static size_t
+__lock_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t retval;
+
+ /*
+ * Figure out how much space we're going to need. This list should
+ * map one-to-one with the __db_shalloc calls in __lock_init.
+ */
+ retval = 0;
+ retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 1);
+ retval += __db_shalloc_size(dbenv->lk_modes * dbenv->lk_modes, 1);
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 1);
+ retval += __db_shalloc_size(
+ __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 1);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ retval +=
+ __db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 1);
+#endif
+ retval += __db_shalloc_size(
+ sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
+
+ /*
+ * Include 16 bytes of string space per lock. DB doesn't use it
+ * because we pre-allocate lock space for DBTs in the structure.
+ */
+ retval += __db_shalloc_size(dbenv->lk_max * 16, sizeof(size_t));
+
+ /* And we keep getting this wrong, let's be generous. */
+ retval += retval / 4;
+
+ return (retval);
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __lock_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ */
+static size_t
+__lock_region_maint(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(DB_MUTEX *) * dbenv->lk_max;
+ return (s);
+}
+#endif
+
+/*
+ * __lock_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__lock_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_LOCKREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __lock_id_set --
+ * Set the current locker ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_id_set(dbenv, cur_id, max_id)
+ DB_ENV *dbenv;
+ u_int32_t cur_id, max_id;
+{
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_id_set", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ region->stat.st_id = cur_id;
+ region->stat.st_cur_maxid = max_id;
+
+ return (0);
+}
+#endif
diff --git a/storage/bdb/lock/lock_stat.c b/storage/bdb/lock/lock_stat.c
new file mode 100644
index 00000000000..0bef3e18021
--- /dev/null
+++ b/storage/bdb/lock/lock_stat.c
@@ -0,0 +1,398 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_stat.c,v 11.32 2002/08/14 20:08:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/db_am.h"
+
+static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKER *, FILE *));
+static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
+static void __lock_printheader __P((void));
+
+/*
+ * __lock_stat --
+ * Return LOCK statistics.
+ *
+ * PUBLIC: int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ */
+int
+__lock_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_LOCK_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCK_STAT *stats, tmp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_stat", DB_INIT_LOCK);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->lock_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ lt = dbenv->lk_handle;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(*stats), &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &lt->reginfo);
+
+ region = lt->reginfo.primary;
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_locktimeout = region->lk_timeout;
+ stats->st_txntimeout = region->tx_timeout;
+
+ stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = lt->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ tmp = region->stat;
+ memset(&region->stat, 0, sizeof(region->stat));
+ lt->reginfo.rp->mutex.mutex_set_wait = 0;
+ lt->reginfo.rp->mutex.mutex_set_nowait = 0;
+
+ region->stat.st_id = tmp.st_id;
+ region->stat.st_cur_maxid = tmp.st_cur_maxid;
+ region->stat.st_maxlocks = tmp.st_maxlocks;
+ region->stat.st_maxlockers = tmp.st_maxlockers;
+ region->stat.st_maxobjects = tmp.st_maxobjects;
+ region->stat.st_nlocks =
+ region->stat.st_maxnlocks = tmp.st_nlocks;
+ region->stat.st_nlockers =
+ region->stat.st_maxnlockers = tmp.st_nlockers;
+ region->stat.st_nobjects =
+ region->stat.st_maxnobjects = tmp.st_nobjects;
+ region->stat.st_nmodes = tmp.st_nmodes;
+ }
+
+ R_UNLOCK(dbenv, &lt->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */
+#define LOCK_DUMP_LOCKERS 0x002 /* Display lockers. */
+#define LOCK_DUMP_MEM 0x004 /* Display region memory. */
+#define LOCK_DUMP_OBJECTS 0x008 /* Display objects. */
+#define LOCK_DUMP_PARAMS 0x010 /* Display params. */
+#define LOCK_DUMP_ALL /* All */ \
+ (LOCK_DUMP_CONF | LOCK_DUMP_LOCKERS | LOCK_DUMP_MEM | \
+ LOCK_DUMP_OBJECTS | LOCK_DUMP_PARAMS)
+
+/*
+ * __lock_dump_region --
+ *
+ * PUBLIC: int __lock_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+int
+__lock_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ DB_LOCKER *lip;
+ DB_LOCKOBJ *op;
+ DB_LOCKREGION *lrp;
+ DB_LOCKTAB *lt;
+ u_int32_t flags, i, j;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_dump_region", DB_INIT_LOCK);
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(LOCK_DUMP_ALL);
+ break;
+ case 'c':
+ LF_SET(LOCK_DUMP_CONF);
+ break;
+ case 'l':
+ LF_SET(LOCK_DUMP_LOCKERS);
+ break;
+ case 'm':
+ LF_SET(LOCK_DUMP_MEM);
+ break;
+ case 'o':
+ LF_SET(LOCK_DUMP_OBJECTS);
+ break;
+ case 'p':
+ LF_SET(LOCK_DUMP_PARAMS);
+ break;
+ }
+
+ lt = dbenv->lk_handle;
+ lrp = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ if (LF_ISSET(LOCK_DUMP_PARAMS)) {
+ fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
+ fprintf(fp,
+ "%s: %lu, %s: %lu, %s: %lu,\n%s: %lu, %s: %lu, %s: %lu, %s: %lu\n",
+ "locker table size", (u_long)lrp->locker_t_size,
+ "object table size", (u_long)lrp->object_t_size,
+ "obj_off", (u_long)lrp->obj_off,
+ "osynch_off", (u_long)lrp->osynch_off,
+ "locker_off", (u_long)lrp->locker_off,
+ "lsynch_off", (u_long)lrp->lsynch_off,
+ "need_dd", (u_long)lrp->need_dd);
+ }
+
+ if (LF_ISSET(LOCK_DUMP_CONF)) {
+ fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE);
+ for (i = 0; i < lrp->stat.st_nmodes; i++) {
+ for (j = 0; j < lrp->stat.st_nmodes; j++)
+ fprintf(fp, "%lu\t", (u_long)
+ lt->conflicts[i * lrp->stat.st_nmodes + j]);
+ fprintf(fp, "\n");
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_LOCKERS)) {
+ fprintf(fp, "%s\nLocks grouped by lockers\n", DB_LINE);
+ __lock_printheader();
+ for (i = 0; i < lrp->locker_t_size; i++)
+ for (lip =
+ SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
+ __lock_dump_locker(lt, lip, fp);
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_OBJECTS)) {
+ fprintf(fp, "%s\nLocks grouped by object\n", DB_LINE);
+ __lock_printheader();
+ for (i = 0; i < lrp->object_t_size; i++) {
+ for (op = SH_TAILQ_FIRST(&lt->obj_tab[i], __db_lockobj);
+ op != NULL;
+ op = SH_TAILQ_NEXT(op, links, __db_lockobj))
+ __lock_dump_object(lt, op, fp);
+ }
+ }
+
+ if (LF_ISSET(LOCK_DUMP_MEM))
+ __db_shalloc_dump(lt->reginfo.addr, fp);
+
+ UNLOCKREGION(dbenv, lt);
+
+ return (0);
+}
+
+static void
+__lock_dump_locker(lt, lip, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKER *lip;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+ time_t s;
+ char buf[64];
+
+ fprintf(fp, "%8lx dd=%2ld locks held %-4d write locks %-4d",
+ (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites);
+ fprintf(fp, " %s ", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " ");
+ if (LOCK_TIME_ISVALID(&lip->tx_expire)) {
+ s = lip->tx_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " expires %s.%lu", buf, (u_long)lip->tx_expire.tv_usec);
+ }
+ if (F_ISSET(lip, DB_LOCKER_TIMEOUT))
+ fprintf(fp, " lk timeout %u", lip->lk_timeout);
+ if (LOCK_TIME_ISVALID(&lip->lk_expire)) {
+ s = lip->lk_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " lk expires %s.%lu", buf, (u_long)lip->lk_expire.tv_usec);
+ }
+ fprintf(fp, "\n");
+
+ lp = SH_LIST_FIRST(&lip->heldby, __db_lock);
+ if (lp != NULL) {
+ for (; lp != NULL;
+ lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ fprintf(fp, "\n");
+ }
+}
+
+static void
+__lock_dump_object(lt, op, fp)
+ DB_LOCKTAB *lt;
+ DB_LOCKOBJ *op;
+ FILE *fp;
+{
+ struct __db_lock *lp;
+
+ for (lp =
+ SH_TAILQ_FIRST(&op->holders, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+ for (lp =
+ SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+
+ fprintf(fp, "\n");
+}
+
+/*
+ * __lock_printheader --
+ */
+static void
+__lock_printheader()
+{
+ printf("%-8s %-6s %-6s %-10s %s\n",
+ "Locker", "Mode",
+ "Count", "Status", "----------- Object ----------");
+}
+
+/*
+ * __lock_printlock --
+ *
+ * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+ */
+void
+__lock_printlock(lt, lp, ispgno)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lp;
+ int ispgno;
+{
+ DB_LOCKOBJ *lockobj;
+ db_pgno_t pgno;
+ u_int32_t *fidp, type;
+ u_int8_t *ptr;
+ char *namep;
+ const char *mode, *status;
+
+ switch (lp->mode) {
+ case DB_LOCK_DIRTY:
+ mode = "DIRTY_READ";
+ break;
+ case DB_LOCK_IREAD:
+ mode = "IREAD";
+ break;
+ case DB_LOCK_IWR:
+ mode = "IWR";
+ break;
+ case DB_LOCK_IWRITE:
+ mode = "IWRITE";
+ break;
+ case DB_LOCK_NG:
+ mode = "NG";
+ break;
+ case DB_LOCK_READ:
+ mode = "READ";
+ break;
+ case DB_LOCK_WRITE:
+ mode = "WRITE";
+ break;
+ case DB_LOCK_WWRITE:
+ mode = "WAS_WRITE";
+ break;
+ case DB_LOCK_WAIT:
+ mode = "WAIT";
+ break;
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+ switch (lp->status) {
+ case DB_LSTAT_ABORTED:
+ status = "ABORT";
+ break;
+ case DB_LSTAT_ERR:
+ status = "ERROR";
+ break;
+ case DB_LSTAT_FREE:
+ status = "FREE";
+ break;
+ case DB_LSTAT_HELD:
+ status = "HELD";
+ break;
+ case DB_LSTAT_WAITING:
+ status = "WAIT";
+ break;
+ case DB_LSTAT_PENDING:
+ status = "PENDING";
+ break;
+ case DB_LSTAT_EXPIRED:
+ status = "EXPIRED";
+ break;
+ default:
+ status = "UNKNOWN";
+ break;
+ }
+ printf("%8lx %-6s %6lu %-10s ",
+ (u_long)lp->holder, mode, (u_long)lp->refcount, status);
+
+ lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ ptr = SH_DBT_PTR(&lockobj->lockobj);
+ if (ispgno && lockobj->lockobj.size == sizeof(struct __db_ilock)) {
+ /* Assume this is a DBT lock. */
+ memcpy(&pgno, ptr, sizeof(db_pgno_t));
+ fidp = (u_int32_t *)(ptr + sizeof(db_pgno_t));
+ type = *(u_int32_t *)(ptr + sizeof(db_pgno_t) + DB_FILE_ID_LEN);
+ if (__dbreg_get_name(lt->dbenv, (u_int8_t *)fidp, &namep) != 0)
+ namep = NULL;
+ if (namep == NULL)
+ printf("(%lx %lx %lx %lx %lx)",
+ (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2],
+ (u_long)fidp[3], (u_long)fidp[4]);
+ else
+ printf("%-20s", namep);
+ printf("%-7s %lu\n",
+ type == DB_PAGE_LOCK ? "page" :
+ type == DB_RECORD_LOCK ? "record" : "handle",
+ (u_long)pgno);
+ } else {
+ printf("0x%lx ", (u_long)R_OFFSET(&lt->reginfo, lockobj));
+ __db_pr(ptr, lockobj->lockobj.size, stdout);
+ printf("\n");
+ }
+}
diff --git a/storage/bdb/lock/lock_util.c b/storage/bdb/lock/lock_util.c
new file mode 100644
index 00000000000..260f021b1ee
--- /dev/null
+++ b/storage/bdb/lock/lock_util.c
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: lock_util.c,v 11.8 2002/03/27 04:32:20 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+/*
+ * __lock_cmp --
+ * This function is used to compare a DBT that is about to be entered
+ * into a hash table with an object already in the hash table. Note
+ * that it just returns true on equal and 0 on not-equal. Therefore
+ * this function cannot be used as a sort function; its purpose is to
+ * be used as a hash comparison function.
+ *
+ * PUBLIC: int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+ */
+int
+__lock_cmp(dbt, lock_obj)
+ const DBT *dbt;
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+ return (dbt->size == lock_obj->lockobj.size &&
+ memcmp(dbt->data, obj_data, dbt->size) == 0);
+}
+
+/*
+ * PUBLIC: int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+ */
+int
+__lock_locker_cmp(locker, sh_locker)
+ u_int32_t locker;
+ DB_LOCKER *sh_locker;
+{
+ return (locker == sh_locker->id);
+}
+
+/*
+ * The next two functions are the hash functions used to store objects in the
+ * lock hash tables. They are hashing the same items, but one (__lock_ohash)
+ * takes a DBT (used for hashing a parameter passed from the user) and the
+ * other (__lock_lhash) takes a DB_LOCKOBJ (used for hashing something that is
+ * already in the lock manager). In both cases, we have a special check to
+ * fast path the case where we think we are doing a hash on a DB page/fileid
+ * pair. If the size is right, then we do the fast hash.
+ *
+ * We know that DB uses DB_LOCK_ILOCK types for its lock objects. The first
+ * four bytes are the 4-byte page number and the next DB_FILE_ID_LEN bytes
+ * are a unique file id, where the first 4 bytes on UNIX systems are the file
+ * inode number, and the first 4 bytes on Windows systems are the FileIndexLow
+ * bytes. So, we use the XOR of the page number and the first four bytes of
+ * the file id to produce a 32-bit hash value.
+ *
+ * We have no particular reason to believe that this algorithm will produce
+ * a good hash, but we want a fast hash more than we want a good one, when
+ * we're coming through this code path.
+ */
+#define FAST_HASH(P) { \
+ u_int32_t __h; \
+ u_int8_t *__cp, *__hp; \
+ __hp = (u_int8_t *)&__h; \
+ __cp = (u_int8_t *)(P); \
+ __hp[0] = __cp[0] ^ __cp[4]; \
+ __hp[1] = __cp[1] ^ __cp[5]; \
+ __hp[2] = __cp[2] ^ __cp[6]; \
+ __hp[3] = __cp[3] ^ __cp[7]; \
+ return (__h); \
+}
+
+/*
+ * __lock_ohash --
+ *
+ * PUBLIC: u_int32_t __lock_ohash __P((const DBT *));
+ */
+u_int32_t
+__lock_ohash(dbt)
+ const DBT *dbt;
+{
+ if (dbt->size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(dbt->data);
+
+ return (__ham_func5(NULL, dbt->data, dbt->size));
+}
+
+/*
+ * __lock_lhash --
+ *
+ * PUBLIC: u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+ */
+u_int32_t
+__lock_lhash(lock_obj)
+ DB_LOCKOBJ *lock_obj;
+{
+ void *obj_data;
+
+ obj_data = SH_DBT_PTR(&lock_obj->lockobj);
+
+ if (lock_obj->lockobj.size == sizeof(DB_LOCK_ILOCK))
+ FAST_HASH(obj_data);
+
+ return (__ham_func5(NULL, obj_data, lock_obj->lockobj.size));
+}
+
+/*
+ * __lock_locker_hash --
+ * Hash function for entering lockers into the locker hash table.
+ * Since these are simply 32-bit unsigned integers, just return
+ * the locker value.
+ *
+ * PUBLIC: u_int32_t __lock_locker_hash __P((u_int32_t));
+ */
+u_int32_t
+__lock_locker_hash(locker)
+ u_int32_t locker;
+{
+ return (locker);
+}
diff --git a/storage/bdb/log/log.c b/storage/bdb/log/log.c
new file mode 100644
index 00000000000..f57caeccb95
--- /dev/null
+++ b/storage/bdb/log/log.c
@@ -0,0 +1,1084 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log.c,v 11.111 2002/08/16 00:27:44 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __log_init __P((DB_ENV *, DB_LOG *));
+static int __log_recover __P((DB_LOG *));
+static size_t __log_region_size __P((DB_ENV *));
+static int __log_zero __P((DB_ENV *, DB_LSN *, DB_LSN *));
+
+/*
+ * __log_open --
+ * Internal version of log_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __log_open __P((DB_ENV *));
+ */
+int
+__log_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+
+ /* Create/initialize the DB_LOG structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOG), &dblp)) != 0)
+ return (ret);
+ dblp->dbenv = dbenv;
+
+ /* Join/create the log region. */
+ dblp->reginfo.type = REGION_TYPE_LOG;
+ dblp->reginfo.id = INVALID_REGION_ID;
+ dblp->reginfo.mode = dbenv->db_mode;
+ dblp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&dblp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(
+ dbenv, &dblp->reginfo, __log_region_size(dbenv))) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ if ((ret = __log_init(dbenv, dblp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ lp = dblp->reginfo.primary =
+ R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary);
+
+ /*
+ * If the region is threaded, then we have to lock both the handles
+ * and the region, and we need to allocate a mutex for that purpose.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &dblp->reginfo, &dblp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK)) != 0)
+ goto err;
+
+ /* Initialize the rest of the structure. */
+ dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
+
+ /*
+ * Set the handle -- we may be about to run recovery, which allocates
+ * log cursors. Log cursors require logging be already configured,
+ * and the handle being set is what demonstrates that.
+ *
+ * If we created the region, run recovery. If that fails, make sure
+ * we reset the log handle before cleaning up, otherwise we will try
+ * and clean up again in the mainline DB_ENV initialization code.
+ */
+ dbenv->lg_handle = dblp;
+
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE)) {
+ if ((ret = __log_recover(dblp)) != 0) {
+ dbenv->lg_handle = NULL;
+ goto err;
+ }
+
+ /*
+ * We first take the log file size from the environment, if
+ * specified. If that wasn't set, recovery may have set it
+ * from the persistent information in a log file header. If
+ * that didn't set it either, we default.
+ */
+ if (lp->log_size == 0)
+ lp->log_size = lp->log_nsize = LG_MAX_DEFAULT;
+ } else {
+ /*
+ * A process joining the region may have reset the log file
+ * size, too. If so, it only affects the next log file we
+ * create.
+ */
+ if (dbenv->lg_size != 0)
+ lp->log_nsize = dbenv->lg_size;
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (0);
+
+err: if (dblp->reginfo.addr != NULL) {
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__db_r_detach(dbenv, &dblp->reginfo, 0);
+ }
+
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+
+ __os_free(dbenv, dblp);
+
+ return (ret);
+}
+
+/*
+ * __log_init --
+ * Initialize a log region in shared memory.
+ */
+static int
+__log_init(dbenv, dblp)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+{
+ DB_MUTEX *flush_mutexp;
+ LOG *region;
+ int ret;
+ void *p;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
+
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(*region), 0, &dblp->reginfo.primary)) != 0)
+ goto mem_err;
+ dblp->reginfo.rp->primary =
+ R_OFFSET(&dblp->reginfo, dblp->reginfo.primary);
+ region = dblp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->fid_max = 0;
+ SH_TAILQ_INIT(&region->fq);
+ region->free_fid_stack = INVALID_ROFF;
+ region->free_fids = region->free_fids_alloced = 0;
+
+ /* Initialize LOG LSNs. */
+ INIT_LSN(region->lsn);
+ INIT_LSN(region->ready_lsn);
+ INIT_LSN(region->t_lsn);
+
+ /*
+ * It's possible to be waiting for an LSN of [1][0], if a replication
+ * client gets the first log record out of order. An LSN of [0][0]
+ * signifies that we're not waiting.
+ */
+ ZERO_LSN(region->waiting_lsn);
+
+ /*
+ * Log makes note of the fact that it ran into a checkpoint on
+ * startup if it did so, as a recovery optimization. A zero
+ * LSN signifies that it hasn't found one [yet].
+ */
+ ZERO_LSN(region->cached_ckp_lsn);
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the log maintenance info and initialize it. */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(REGMAINT) + LG_MAINT_SIZE, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&dblp->reginfo, addr, LG_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&dblp->reginfo, addr);
+#endif
+
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &region->fq_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+
+ /*
+ * We must create a place for the flush mutex separately; mutexes have
+ * to be aligned to MUTEX_ALIGN, and the only way to guarantee that is
+ * to make sure they're at the beginning of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(DB_MUTEX), MUTEX_ALIGN, &flush_mutexp)) != 0)
+ goto mem_err;
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, flush_mutexp,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ region->flush_mutex_off = R_OFFSET(&dblp->reginfo, flush_mutexp);
+
+ /* Initialize the buffer. */
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, dbenv->lg_bsize, 0, &p)) != 0) {
+mem_err: __db_err(dbenv, "Unable to allocate memory for the log buffer");
+ return (ret);
+ }
+ region->buffer_size = dbenv->lg_bsize;
+ region->buffer_off = R_OFFSET(&dblp->reginfo, p);
+ region->log_size = region->log_nsize = dbenv->lg_size;
+
+ /* Initialize the commit Queue. */
+ SH_TAILQ_INIT(&region->free_commits);
+ SH_TAILQ_INIT(&region->commits);
+ region->ncommit = 0;
+
+ /*
+ * Fill in the log's persistent header. Don't fill in the log file
+ * sizes, as they may change at any time and so have to be filled in
+ * as each log file is created.
+ */
+ region->persist.magic = DB_LOGMAGIC;
+ region->persist.version = DB_LOGVERSION;
+ region->persist.mode = (u_int32_t)dbenv->db_mode;
+
+ return (0);
+}
+
+/*
+ * __log_recover --
+ * Recover a log.
+ */
+static int
+__log_recover(dblp)
+ DB_LOG *dblp;
+{
+ DBT dbt;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ LOG *lp;
+ u_int32_t cnt, rectype;
+ int ret;
+ logfile_validity status;
+
+ logc = NULL;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Find a log file. If none exist, we simply return, leaving
+ * everything initialized to a new log.
+ */
+ if ((ret = __log_find(dblp, 0, &cnt, &status)) != 0)
+ return (ret);
+ if (cnt == 0)
+ return (0);
+
+ /*
+ * If the last file is an old version, readable or no, start a new
+ * file. Don't bother finding the end of the last log file;
+ * we assume that it's valid in its entirety, since the user
+ * should have shut down cleanly or run recovery before upgrading.
+ */
+ if (status == DB_LV_OLD_READABLE || status == DB_LV_OLD_UNREADABLE) {
+ lp->lsn.file = lp->s_lsn.file = cnt + 1;
+ lp->lsn.offset = lp->s_lsn.offset = 0;
+ goto skipsearch;
+ }
+ DB_ASSERT(status == DB_LV_NORMAL);
+
+ /*
+ * We have the last useful log file and we've loaded any persistent
+ * information. Set the end point of the log past the end of the last
+ * file. Read the last file, looking for the last checkpoint and
+ * the log's end.
+ */
+ lp->lsn.file = cnt + 1;
+ lp->lsn.offset = 0;
+ lsn.file = cnt;
+ lsn.offset = 0;
+
+ /*
+ * Allocate a cursor and set it to the first record. This shouldn't
+ * fail, leave error messages on.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ F_SET(logc, DB_LOG_LOCKED);
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_SET)) != 0)
+ goto err;
+
+ /*
+ * Read to the end of the file. This may fail at some point, so
+ * turn off error messages.
+ */
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ while (logc->get(logc, &lsn, &dbt, DB_NEXT) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp)
+ /*
+ * If we happen to run into a checkpoint, cache its
+ * LSN so that the transaction system doesn't have
+ * to walk this log file again looking for it.
+ */
+ lp->cached_ckp_lsn = lsn;
+ }
+ F_CLR(logc, DB_LOG_SILENT_ERR);
+
+ /*
+ * We now know where the end of the log is. Set the first LSN that
+ * we want to return to an application and the LSN of the last known
+ * record on disk.
+ */
+ lp->lsn = lsn;
+ lp->s_lsn = lsn;
+ lp->lsn.offset += logc->c_len;
+ lp->s_lsn.offset += logc->c_len;
+
+ /* Set up the current buffer information, too. */
+ lp->len = logc->c_len;
+ lp->b_off = 0;
+ lp->w_off = lp->lsn.offset;
+
+skipsearch:
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv,
+ "Finding last valid log LSN: file: %lu offset %lu",
+ (u_long)lp->lsn.file, (u_long)lp->lsn.offset);
+
+err: if (logc != NULL)
+ (void)logc->close(logc, 0);
+
+ return (ret);
+}
+
+/*
+ * __log_find --
+ * Try to find a log file. If find_first is set, valp will contain
+ * the number of the first readable log file, else it will contain the number
+ * of the last log file (which may be too old to read).
+ *
+ * PUBLIC: int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *));
+ */
+int
+__log_find(dblp, find_first, valp, statusp)
+ DB_LOG *dblp;
+ int find_first;
+ u_int32_t *valp;
+ logfile_validity *statusp;
+{
+ DB_ENV *dbenv;
+ logfile_validity logval_status, status;
+ u_int32_t clv, logval;
+ int cnt, fcnt, ret;
+ const char *dir;
+ char *c, **names, *p, *q, savech;
+
+ dbenv = dblp->dbenv;
+ logval_status = status = DB_LV_NONEXISTENT;
+
+ /* Return a value of 0 as the log file number on failure. */
+ *valp = 0;
+
+ /* Find the directory name. */
+ if ((ret = __log_name(dblp, 1, &p, NULL, 0)) != 0)
+ return (ret);
+ if ((q = __db_rpath(p)) == NULL) {
+ COMPQUIET(savech, 0);
+ dir = PATH_DOT;
+ } else {
+ savech = *q;
+ *q = '\0';
+ dir = p;
+ }
+
+ /* Get the list of file names. */
+ ret = __os_dirlist(dbenv, dir, &names, &fcnt);
+
+ /*
+ * !!!
+ * We overwrote a byte in the string with a nul. Restore the string
+ * so that the diagnostic checks in the memory allocation code work
+ * and any error messages display the right file name.
+ */
+ if (q != NULL)
+ *q = savech;
+
+ if (ret != 0) {
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+ __os_free(dbenv, p);
+ return (ret);
+ }
+
+ /* Search for a valid log file name. */
+ for (cnt = fcnt, clv = logval = 0; --cnt >= 0;) {
+ if (strncmp(names[cnt], LFPREFIX, sizeof(LFPREFIX) - 1) != 0)
+ continue;
+
+ /*
+ * Names of the form log\.[0-9]* are reserved for DB. Other
+ * names sharing LFPREFIX, such as "log.db", are legal.
+ */
+ for (c = names[cnt] + sizeof(LFPREFIX) - 1; *c != '\0'; c++)
+ if (!isdigit((int)*c))
+ break;
+ if (*c != '\0')
+ continue;
+
+ /*
+ * Use atol, not atoi; if an "int" is 16-bits, the largest
+ * log file name won't fit.
+ */
+ clv = atol(names[cnt] + (sizeof(LFPREFIX) - 1));
+
+ /*
+ * If searching for the first log file, we want to return the
+ * oldest log file we can read, or, if no readable log files
+ * exist, the newest log file we can't read (the crossover
+ * point between the old and new versions of the log file).
+ *
+ * If we're searching for the last log file, we want to return
+ * the newest log file, period.
+ *
+ * Readable log files should never preceede unreadable log
+ * files, that would mean the admin seriously screwed up.
+ */
+ if (find_first) {
+ if (logval != 0 &&
+ status != DB_LV_OLD_UNREADABLE && clv > logval)
+ continue;
+ } else
+ if (logval != 0 && clv < logval)
+ continue;
+
+ if ((ret = __log_valid(dblp, clv, 1, &status)) != 0) {
+ __db_err(dbenv, "Invalid log file: %s: %s",
+ names[cnt], db_strerror(ret));
+ goto err;
+ }
+ switch (status) {
+ case DB_LV_NONEXISTENT:
+ /* __log_valid never returns DB_LV_NONEXISTENT. */
+ DB_ASSERT(0);
+ break;
+ case DB_LV_INCOMPLETE:
+ /*
+ * The last log file may not have been initialized --
+ * it's possible to create a log file but not write
+ * anything to it. If performing recovery (that is,
+ * if find_first isn't set), ignore the file, it's
+ * not interesting. If we're searching for the first
+ * log record, return the file (assuming we don't find
+ * something better), as the "real" first log record
+ * is likely to be in the log buffer, and we want to
+ * set the file LSN for our return.
+ */
+ if (find_first)
+ goto found;
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ /*
+ * If we're searching for the first log file, then we
+ * only want this file if we don't yet have a file or
+ * already have an unreadable file and this one is
+ * newer than that one. If we're searching for the
+ * last log file, we always want this file because we
+ * wouldn't be here if it wasn't newer than our current
+ * choice.
+ */
+ if (!find_first || logval == 0 ||
+ (status == DB_LV_OLD_UNREADABLE && clv > logval))
+ goto found;
+ break;
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+found: logval = clv;
+ logval_status = status;
+ break;
+ }
+ }
+
+ *valp = logval;
+
+err: __os_dirfree(dbenv, names, fcnt);
+ __os_free(dbenv, p);
+ *statusp = logval_status;
+
+ return (ret);
+}
+
+/*
+ * log_valid --
+ * Validate a log file. Returns an error code in the event of
+ * a fatal flaw in a the specified log file; returns success with
+ * a code indicating the currentness and completeness of the specified
+ * log file if it is not unexpectedly flawed (that is, if it's perfectly
+ * normal, if it's zero-length, or if it's an old version).
+ *
+ * PUBLIC: int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+ */
+int
+__log_valid(dblp, number, set_persist, statusp)
+ DB_LOG *dblp;
+ u_int32_t number;
+ int set_persist;
+ logfile_validity *statusp;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_FH fh;
+ HDR *hdr;
+ LOG *region;
+ LOGP *persist;
+ logfile_validity status;
+ size_t hdrsize, nw, recsize;
+ int is_hmac, need_free, ret;
+ u_int8_t *tmp;
+ char *fname;
+
+ dbenv = dblp->dbenv;
+ db_cipher = dbenv->crypto_handle;
+ persist = NULL;
+ status = DB_LV_NORMAL;
+
+ /* Try to open the log file. */
+ if ((ret = __log_name(dblp,
+ number, &fname, &fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ __os_free(dbenv, fname);
+ return (ret);
+ }
+
+ need_free = 0;
+ hdrsize = HDR_NORMAL_SZ;
+ is_hmac = 0;
+ recsize = sizeof(LOGP);
+ if (CRYPTO_ON(dbenv)) {
+ hdrsize = HDR_CRYPTO_SZ;
+ recsize = sizeof(LOGP);
+ recsize += db_cipher->adj_size(recsize);
+ is_hmac = 1;
+ }
+ if ((ret = __os_calloc(dbenv, 1, recsize + hdrsize, &tmp)) != 0)
+ return (ret);
+ need_free = 1;
+ hdr = (HDR *)tmp;
+ persist = (LOGP *)(tmp + hdrsize);
+ /* Try to read the header. */
+ if ((ret = __os_read(dbenv, &fh, tmp, recsize + hdrsize, &nw)) != 0 ||
+ nw != recsize + hdrsize) {
+ if (ret == 0)
+ status = DB_LV_INCOMPLETE;
+ else
+ /*
+ * The error was a fatal read error, not just an
+ * incompletely initialized log file.
+ */
+ __db_err(dbenv, "Ignoring log file: %s: %s",
+ fname, db_strerror(ret));
+
+ (void)__os_closehandle(dbenv, &fh);
+ goto err;
+ }
+ (void)__os_closehandle(dbenv, &fh);
+
+ /*
+ * Now we have to validate the persistent record. We have
+ * several scenarios we have to deal with:
+ *
+ * 1. User has crypto turned on:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading a current, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . After decryption we'll fail the version check. [NOT YET]
+ * - They're reading a current, encrypted log file
+ * . We should proceed as usual.
+ * 2. User has crypto turned off:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the version check.
+ * - They're reading a current, unencrypted log file
+ * . We should proceed as usual.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . We'll fail the magic number check (it is encrypted).
+ * - They're reading a current, encrypted log file
+ * . We'll fail the magic number check (it is encrypted).
+ */
+ if (CRYPTO_ON(dbenv)) {
+ /*
+ * If we are trying to decrypt an unencrypted log
+ * we can only detect that by having an unreasonable
+ * data length for our persistent data.
+ */
+ if ((hdr->len - hdrsize) != sizeof(LOGP)) {
+ __db_err(dbenv, "log record size mismatch");
+ goto err;
+ }
+ /* Check the checksum and decrypt. */
+ if ((ret = __db_check_chksum(dbenv, db_cipher, &hdr->chksum[0],
+ (u_int8_t *)persist, hdr->len - hdrsize, is_hmac)) != 0) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], (u_int8_t *)persist, hdr->len - hdrsize)) != 0)
+ goto err;
+ }
+
+ /* Validate the header. */
+ if (persist->magic != DB_LOGMAGIC) {
+ __db_err(dbenv,
+ "Ignoring log file: %s: magic number %lx, not %lx",
+ fname, (u_long)persist->magic, (u_long)DB_LOGMAGIC);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Set our status code to indicate whether the log file
+ * belongs to an unreadable or readable old version; leave it
+ * alone if and only if the log file version is the current one.
+ */
+ if (persist->version > DB_LOGVERSION) {
+ /* This is a fatal error--the log file is newer than DB. */
+ __db_err(dbenv,
+ "Ignoring log file: %s: unsupported log version %lu",
+ fname, (u_long)persist->version);
+ ret = EINVAL;
+ goto err;
+ } else if (persist->version < DB_LOGOLDVER) {
+ status = DB_LV_OLD_UNREADABLE;
+ /*
+ * We don't want to set persistent info based on an
+ * unreadable region, so jump to "err".
+ */
+ goto err;
+ } else if (persist->version < DB_LOGVERSION)
+ status = DB_LV_OLD_READABLE;
+
+ /*
+ * Only if we have a current log do we verify the checksum.
+ * We could not check the checksum before checking the magic
+ * and version because old log hdrs have the length and checksum
+ * in a different location.
+ */
+ if (!CRYPTO_ON(dbenv) && ((ret = __db_check_chksum(dbenv,
+ db_cipher, &hdr->chksum[0], (u_int8_t *)persist,
+ hdr->len - hdrsize, is_hmac)) != 0)) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+
+ /*
+ * If the log is readable so far and we're doing system initialization,
+ * set the region's persistent information based on the headers.
+ *
+ * Always set the current log file size. Only set the next log file's
+ * size if the application hasn't set it already.
+ *
+ * XXX
+ * Always use the persistent header's mode, regardless of what was set
+ * in the current environment. We've always done it this way, but it's
+ * probably a bug -- I can't think of a way not-changing the mode would
+ * be a problem, though.
+ */
+ if (set_persist) {
+ region = dblp->reginfo.primary;
+ region->log_size = persist->log_size;
+ if (region->log_nsize == 0)
+ region->log_nsize = persist->log_size;
+ region->persist.mode = persist->mode;
+ }
+
+err: __os_free(dbenv, fname);
+ if (need_free)
+ __os_free(dbenv, tmp);
+ *statusp = status;
+ return (ret);
+}
+
+/*
+ * __log_dbenv_refresh --
+ * Clean up after the log system on a close or failed open. Called only
+ * from __dbenv_refresh. (Formerly called __log_close.)
+ *
+ * PUBLIC: int __log_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__log_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ int ret, t_ret;
+
+ dblp = dbenv->lg_handle;
+
+ /* We may have opened files as part of XA; if so, close them. */
+ F_SET(dblp, DBLOG_RECOVER);
+ ret = __dbreg_close_files(dbenv);
+
+ /* Discard the per-thread lock. */
+ if (dblp->mutexp != NULL)
+ __db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
+
+ /* Detach from the region. */
+ if ((t_ret =
+ __db_r_detach(dbenv, &dblp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Close open files, release allocated memory. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, &dblp->lfh)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dblp->dbentry != NULL)
+ __os_free(dbenv, dblp->dbentry);
+
+ __os_free(dbenv, dblp);
+
+ dbenv->lg_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __log_stat --
+ * Return log statistics.
+ *
+ * PUBLIC: int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+ */
+int
+__log_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_LOG_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LOG_STAT *stats;
+ LOG *region;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_stat", DB_INIT_LOG);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ dblp = dbenv->lg_handle;
+ region = dblp->reginfo.primary;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_LOG_STAT), &stats)) != 0)
+ return (ret);
+
+ /* Copy out the global statistics. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ *stats = region->stat;
+ if (LF_ISSET(DB_STAT_CLEAR))
+ memset(&region->stat, 0, sizeof(region->stat));
+
+ stats->st_magic = region->persist.magic;
+ stats->st_version = region->persist.version;
+ stats->st_mode = region->persist.mode;
+ stats->st_lg_bsize = region->buffer_size;
+ stats->st_lg_size = region->log_nsize;
+
+ stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dblp->reginfo.rp->mutex.mutex_set_wait = 0;
+ dblp->reginfo.rp->mutex.mutex_set_nowait = 0;
+ }
+ stats->st_regsize = dblp->reginfo.rp->size;
+
+ stats->st_cur_file = region->lsn.file;
+ stats->st_cur_offset = region->lsn.offset;
+ stats->st_disk_file = region->s_lsn.file;
+ stats->st_disk_offset = region->s_lsn.offset;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
+
+/*
+ * __log_get_cached_ckp_lsn --
+ * Retrieve any last checkpoint LSN that we may have found on startup.
+ *
+ * PUBLIC: void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+ */
+void
+__log_get_cached_ckp_lsn(dbenv, ckp_lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *ckp_lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ *ckp_lsnp = lp->cached_ckp_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_region_size --
+ * Return the amount of space needed for the log region.
+ * Make the region large enough to hold txn_max transaction
+ * detail structures plus some space to hold thread handles
+ * and the beginning of the shalloc region and anything we
+ * need for mutex system resource recording.
+ */
+static size_t
+__log_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = dbenv->lg_regionmax + dbenv->lg_bsize;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + LG_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __log_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __log_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__log_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __log_vtruncate
+ * This is a virtual truncate. We set up the log indicators to
+ * make everyone believe that the given record is the last one in the
+ * log. Returns with the next valid LSN (i.e., the LSN of the next
+ * record to be written). This is used in replication to discard records
+ * in the log file that do not agree with the master.
+ *
+ * PUBLIC: int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *));
+ */
+int
+__log_vtruncate(dbenv, lsn, ckplsn)
+ DB_ENV *dbenv;
+ DB_LSN *lsn, *ckplsn;
+{
+ DBT log_dbt;
+ DB_FH fh;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN end_lsn;
+ LOG *lp;
+ u_int32_t bytes, c_len;
+ int fn, ret, t_ret;
+ char *fname;
+
+ /* Need to find out the length of this soon-to-be-last record. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ ret = logc->get(logc, lsn, &log_dbt, DB_SET);
+ c_len = logc->c_len;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+
+ /* Now do the truncate. */
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ end_lsn = lp->lsn;
+ lp->lsn = *lsn;
+ lp->len = c_len;
+ lp->lsn.offset += lp->len;
+
+ /*
+ * I am going to assume that the number of bytes written since
+ * the last checkpoint doesn't exceed a 32-bit number.
+ */
+ DB_ASSERT(lp->lsn.file >= ckplsn->file);
+ bytes = 0;
+ if (ckplsn->file != lp->lsn.file) {
+ bytes = lp->log_size - ckplsn->offset;
+ if (lp->lsn.file > ckplsn->file + 1)
+ bytes += lp->log_size *
+ (lp->lsn.file - ckplsn->file - 1);
+ bytes += lp->lsn.offset;
+ } else
+ bytes = lp->lsn.offset - ckplsn->offset;
+
+ lp->stat.st_wc_mbytes += bytes / MEGABYTE;
+ lp->stat.st_wc_bytes += bytes % MEGABYTE;
+
+ /*
+ * If the saved lsn is greater than our new end of log, reset it
+ * to our current end of log.
+ */
+ if (log_compare(&lp->s_lsn, lsn) > 0)
+ lp->s_lsn = lp->lsn;
+
+ /*
+ * If the new end of log is in the middle of the buffer,
+ * don't change the w_off or f_lsn. If the new end is
+ * before the w_off then reset w_off and f_lsn to the new
+ * end of log.
+ */
+ if (lp->w_off >= lp->lsn.offset) {
+ lp->f_lsn = lp->lsn;
+ lp->w_off = lp->lsn.offset;
+ lp->b_off = 0;
+ } else
+ lp->b_off = lp->lsn.offset - lp->w_off;
+
+ ZERO_LSN(lp->waiting_lsn);
+ lp->ready_lsn = lp->lsn;
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+
+ /* Now throw away any extra log files that we have around. */
+ for (fn = lp->lsn.file + 1;; fn++) {
+ if (__log_name(dblp, fn, &fname, &fh, DB_OSO_RDONLY) != 0) {
+ __os_free(dbenv, fname);
+ break;
+ }
+ (void)__os_closehandle(dbenv, &fh);
+ ret = __os_unlink(dbenv, fname);
+ __os_free(dbenv, fname);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Truncate the log to the new point. */
+ if ((ret = __log_zero(dbenv, &lp->lsn, &end_lsn)) != 0)
+ goto err;
+
+err: R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_is_outdated --
+ * Used by the replication system to identify if a client's logs
+ * are too old. The log represented by dbenv is compared to the file
+ * number passed in fnum. If the log file fnum does not exist and is
+ * lower-numbered than the current logs, the we return *outdatedp non
+ * zero, else we return it 0.
+ *
+ * PUBLIC: int __log_is_outdated __P((DB_ENV *dbenv,
+ * PUBLIC: u_int32_t fnum, int *outdatedp));
+ */
+int
+__log_is_outdated(dbenv, fnum, outdatedp)
+ DB_ENV *dbenv;
+ u_int32_t fnum;
+ int *outdatedp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ char *name;
+ int ret;
+ u_int32_t cfile;
+
+ dblp = dbenv->lg_handle;
+ *outdatedp = 0;
+
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ return (ret);
+
+ /* If the file exists, we're just fine. */
+ if (__os_exists(name, NULL) == 0)
+ goto out;
+
+ /*
+ * It didn't exist, decide if the file number is too big or
+ * too little. If it's too little, then we need to indicate
+ * that the LSN is outdated.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = (LOG *)dblp->reginfo.primary;
+ cfile = lp->lsn.file;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (cfile > fnum)
+ *outdatedp = 1;
+out: __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __log_zero --
+ * Zero out the tail of a log after a truncate.
+ */
+static int
+__log_zero(dbenv, from_lsn, to_lsn)
+ DB_ENV *dbenv;
+ DB_LSN *from_lsn, *to_lsn;
+{
+ char *lname;
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+ size_t nbytes, len, nw;
+ u_int8_t buf[4096];
+ u_int32_t mbytes, bytes;
+
+ dblp = dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+ lname = NULL;
+
+ if (dblp->lfname != lp->lsn.file) {
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &dblp->lfh);
+ dblp->lfname = lp->lsn.file;
+ }
+
+ if (from_lsn->file != to_lsn->file) {
+ /* We removed some log files; have to 0 to end of file. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) && (ret =
+ __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ return (ret);
+ if ((ret = __os_ioinfo(dbenv,
+ NULL, &dblp->lfh, &mbytes, &bytes, NULL)) != 0)
+ goto err;
+ len = mbytes * MEGABYTE + bytes - from_lsn->offset;
+ } else if (to_lsn->offset <= from_lsn->offset)
+ return (0);
+ else
+ len = to_lsn->offset = from_lsn->offset;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* Initialize the write position. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (ret = __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_seek(dbenv,
+ &dblp->lfh, 0, 0, from_lsn->offset, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+
+ while (len > 0) {
+ nbytes = len > sizeof(buf) ? sizeof(buf) : len;
+ if ((ret =
+ __os_write(dbenv, &dblp->lfh, buf, nbytes, &nw)) != 0)
+ return (ret);
+ len -= nbytes;
+ }
+err: if (lname != NULL)
+ __os_free(dbenv, lname);
+
+ return (0);
+}
diff --git a/storage/bdb/log/log_archive.c b/storage/bdb/log/log_archive.c
new file mode 100644
index 00000000000..19e1af5a93e
--- /dev/null
+++ b/storage/bdb/log/log_archive.c
@@ -0,0 +1,486 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_archive.c,v 11.39 2002/08/06 05:00:31 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __absname __P((DB_ENV *, char *, char *, char **));
+static int __build_data __P((DB_ENV *, char *, char ***));
+static int __cmpfunc __P((const void *, const void *));
+static int __usermem __P((DB_ENV *, char ***));
+
+/*
+ * __log_archive --
+ * Supporting function for db_archive(1).
+ *
+ * PUBLIC: int __log_archive __P((DB_ENV *, char **[], u_int32_t));
+ */
+int
+__log_archive(dbenv, listp, flags)
+ DB_ENV *dbenv;
+ char ***listp;
+ u_int32_t flags;
+{
+ DBT rec;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN stable_lsn;
+ __txn_ckp_args *ckp_args;
+ char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN];
+ int array_size, db_arch_abs, n, ret;
+ u_int32_t fnum;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_archive", DB_INIT_LOG);
+
+ name = NULL;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(fnum, 0);
+
+#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)
+ if (flags != 0) {
+ if ((ret = __db_fchk(
+ dbenv, "DB_ENV->log_archive", flags, OKFLAGS)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive",
+ flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0)
+ return (ret);
+ }
+
+ if (LF_ISSET(DB_ARCH_ABS)) {
+ db_arch_abs = 1;
+ LF_CLR(DB_ARCH_ABS);
+ } else
+ db_arch_abs = 0;
+
+ if (flags == 0 || flags == DB_ARCH_DATA)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "DB_ENV->log_archive", DB_INIT_TXN);
+
+ /*
+ * Get the absolute pathname of the current directory. It would
+ * be nice to get the shortest pathname of the database directory,
+ * but that's just not possible.
+ *
+ * XXX
+ * Can't trust getcwd(3) to set a valid errno. If it doesn't, just
+ * guess that we ran out of memory.
+ */
+ if (db_arch_abs) {
+ __os_set_errno(0);
+ if ((pref = getcwd(buf, sizeof(buf))) == NULL) {
+ if (__os_get_errno() == 0)
+ __os_set_errno(ENOMEM);
+ return (__os_get_errno());
+ }
+ } else
+ pref = NULL;
+
+ switch (flags) {
+ case DB_ARCH_DATA:
+ return (__build_data(dbenv, pref, listp));
+ case DB_ARCH_LOG:
+ memset(&rec, 0, sizeof(rec));
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+#ifdef UMRW
+ ZERO_LSN(stable_lsn);
+#endif
+ ret = logc->get(logc, &stable_lsn, &rec, DB_LAST);
+ (void)logc->close(logc, 0);
+ if (ret != 0)
+ return (ret);
+ fnum = stable_lsn.file;
+ break;
+ case 0:
+ memset(&rec, 0, sizeof(rec));
+ if (__txn_getckp(dbenv, &stable_lsn) != 0) {
+ /*
+ * A failure return means that there's no checkpoint
+ * in the log (so we are not going to be deleting
+ * any log files).
+ */
+ *listp = NULL;
+ return (0);
+ }
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ if ((ret = logc->get(logc, &stable_lsn, &rec, DB_SET)) != 0 ||
+ (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ /*
+ * A return of DB_NOTFOUND may only mean that the
+ * checkpoint LSN is before the beginning of the
+ * log files that we still have. This is not
+ * an error; it just means our work is done.
+ */
+ if (ret == DB_NOTFOUND) {
+ *listp = NULL;
+ ret = 0;
+ }
+ (void)logc->close(logc, 0);
+ return (ret);
+ }
+ if ((ret = logc->close(logc, 0)) != 0)
+ return (ret);
+ stable_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
+ /* Remove any log files before the last stable LSN. */
+ fnum = stable_lsn.file - 1;
+ break;
+ }
+
+#define LIST_INCREMENT 64
+ /* Get some initial space. */
+ array_size = 64;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ /* Build an array of the file names. */
+ for (n = 0; fnum > 0; --fnum) {
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ goto err;
+ if (__os_exists(name, NULL) != 0) {
+ if (LF_ISSET(DB_ARCH_LOG) && fnum == stable_lsn.file)
+ continue;
+ __os_free(dbenv, name);
+ name = NULL;
+ break;
+ }
+
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ goto err;
+ }
+
+ if (db_arch_abs) {
+ if ((ret = __absname(dbenv,
+ pref, name, &array[n])) != 0)
+ goto err;
+ __os_free(dbenv, name);
+ } else if ((p = __db_rpath(name)) != NULL) {
+ if ((ret = __os_strdup(dbenv, p + 1, &array[n])) != 0)
+ goto err;
+ __os_free(dbenv, name);
+ } else
+ array[n] = name;
+
+ name = NULL;
+ array[++n] = NULL;
+ }
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ *listp = NULL;
+ ret = 0;
+ goto err;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array)) != 0)
+ goto err;
+
+ *listp = array;
+ return (0);
+
+err: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
+ }
+ if (name != NULL)
+ __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __build_data --
+ * Build a list of datafiles for return.
+ */
+static int
+__build_data(dbenv, pref, listp)
+ DB_ENV *dbenv;
+ char *pref, ***listp;
+{
+ DBT rec;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ __dbreg_register_args *argp;
+ u_int32_t rectype;
+ int array_size, last, n, nxt, ret, t_ret;
+ char **array, **arrayp, **list, **lp, *p, *real_name;
+
+ /* Get some initial space. */
+ array_size = 64;
+ if ((ret = __os_malloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ return (ret);
+ array[0] = NULL;
+
+ memset(&rec, 0, sizeof(rec));
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ for (n = 0; (ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0;) {
+ if (rec.size < sizeof(rectype)) {
+ ret = EINVAL;
+ __db_err(dbenv, "DB_ENV->log_archive: bad log record");
+ goto free_continue;
+ }
+
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ if (rectype != DB___dbreg_register)
+ continue;
+ if ((ret =
+ __dbreg_register_read(dbenv, rec.data, &argp)) != 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "DB_ENV->log_archive: unable to read log record");
+ goto free_continue;
+ }
+
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) * array_size, &array)) != 0)
+ goto free_continue;
+ }
+
+ if ((ret = __os_strdup(dbenv,
+ argp->name.data, &array[n++])) != 0)
+ goto free_continue;
+ array[n] = NULL;
+
+ if (argp->ftype == DB_QUEUE) {
+ if ((ret = __qam_extent_names(dbenv,
+ argp->name.data, &list)) != 0)
+ goto q_err;
+ for (lp = list;
+ lp != NULL && *lp != NULL; lp++) {
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) *
+ array_size, &array)) != 0)
+ goto q_err;
+ }
+ if ((ret =
+ __os_strdup(dbenv, *lp, &array[n++])) != 0)
+ goto q_err;
+ array[n] = NULL;
+ }
+q_err: if (list != NULL)
+ __os_free(dbenv, list);
+ }
+free_continue: __os_free(dbenv, argp);
+ if (ret != 0)
+ break;
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err1;
+
+ /* If there's nothing to return, we're done. */
+ if (n == 0) {
+ ret = 0;
+ *listp = NULL;
+ goto err1;
+ }
+
+ /* Sort the list. */
+ qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
+
+ /*
+ * Build the real pathnames, discarding nonexistent files and
+ * duplicates.
+ */
+ for (last = nxt = 0; nxt < n;) {
+ /*
+ * Discard duplicates. Last is the next slot we're going
+ * to return to the user, nxt is the next slot that we're
+ * going to consider.
+ */
+ if (last != nxt) {
+ array[last] = array[nxt];
+ array[nxt] = NULL;
+ }
+ for (++nxt; nxt < n &&
+ strcmp(array[last], array[nxt]) == 0; ++nxt) {
+ __os_free(dbenv, array[nxt]);
+ array[nxt] = NULL;
+ }
+
+ /* Get the real name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, array[last], 0, NULL, &real_name)) != 0)
+ goto err2;
+
+ /* If the file doesn't exist, ignore it. */
+ if (__os_exists(real_name, NULL) != 0) {
+ __os_free(dbenv, real_name);
+ __os_free(dbenv, array[last]);
+ array[last] = NULL;
+ continue;
+ }
+
+ /* Rework the name as requested by the user. */
+ __os_free(dbenv, array[last]);
+ array[last] = NULL;
+ if (pref != NULL) {
+ ret = __absname(dbenv, pref, real_name, &array[last]);
+ __os_free(dbenv, real_name);
+ if (ret != 0)
+ goto err2;
+ } else if ((p = __db_rpath(real_name)) != NULL) {
+ ret = __os_strdup(dbenv, p + 1, &array[last]);
+ __os_free(dbenv, real_name);
+ if (ret != 0)
+ goto err2;
+ } else
+ array[last] = real_name;
+ ++last;
+ }
+
+ /* NULL-terminate the list. */
+ array[last] = NULL;
+
+ /* Rework the memory. */
+ if ((ret = __usermem(dbenv, &array)) != 0)
+ goto err1;
+
+ *listp = array;
+ return (0);
+
+err2: /*
+ * XXX
+ * We've possibly inserted NULLs into the array list, so clean up a
+ * bit so that the other error processing works.
+ */
+ if (array != NULL)
+ for (; nxt < n; ++nxt)
+ __os_free(dbenv, array[nxt]);
+ /* FALLTHROUGH */
+
+err1: if (array != NULL) {
+ for (arrayp = array; *arrayp != NULL; ++arrayp)
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
+ }
+ return (ret);
+}
+
+/*
+ * __absname --
+ * Return an absolute path name for the file.
+ */
+static int
+__absname(dbenv, pref, name, newnamep)
+ DB_ENV *dbenv;
+ char *pref, *name, **newnamep;
+{
+ size_t l_pref, l_name;
+ int isabspath, ret;
+ char *newname;
+
+ l_name = strlen(name);
+ isabspath = __os_abspath(name);
+ l_pref = isabspath ? 0 : strlen(pref);
+
+ /* Malloc space for concatenating the two. */
+ if ((ret = __os_malloc(dbenv,
+ l_pref + l_name + 2, &newname)) != 0)
+ return (ret);
+ *newnamep = newname;
+
+ /* Build the name. If `name' is an absolute path, ignore any prefix. */
+ if (!isabspath) {
+ memcpy(newname, pref, l_pref);
+ if (strchr(PATH_SEPARATOR, newname[l_pref - 1]) == NULL)
+ newname[l_pref++] = PATH_SEPARATOR[0];
+ }
+ memcpy(newname + l_pref, name, l_name + 1);
+
+ return (0);
+}
+
+/*
+ * __usermem --
+ * Create a single chunk of memory that holds the returned information.
+ * If the user has their own malloc routine, use it.
+ */
+static int
+__usermem(dbenv, listp)
+ DB_ENV *dbenv;
+ char ***listp;
+{
+ size_t len;
+ int ret;
+ char **array, **arrayp, **orig, *strp;
+
+ /* Find out how much space we need. */
+ for (len = 0, orig = *listp; *orig != NULL; ++orig)
+ len += sizeof(char *) + strlen(*orig) + 1;
+ len += sizeof(char *);
+
+ /* Allocate it and set up the pointers. */
+ if ((ret = __os_umalloc(dbenv, len, &array)) != 0)
+ return (ret);
+
+ strp = (char *)(array + (orig - *listp) + 1);
+
+ /* Copy the original information into the new memory. */
+ for (orig = *listp, arrayp = array; *orig != NULL; ++orig, ++arrayp) {
+ len = strlen(*orig);
+ memcpy(strp, *orig, len + 1);
+ *arrayp = strp;
+ strp += len + 1;
+
+ __os_free(dbenv, *orig);
+ }
+
+ /* NULL-terminate the list. */
+ *arrayp = NULL;
+
+ __os_free(dbenv, *listp);
+ *listp = array;
+
+ return (0);
+}
+
+static int
+__cmpfunc(p1, p2)
+ const void *p1, *p2;
+{
+ return (strcmp(*((char * const *)p1), *((char * const *)p2)));
+}
diff --git a/storage/bdb/log/log_compare.c b/storage/bdb/log/log_compare.c
new file mode 100644
index 00000000000..115f9c21b76
--- /dev/null
+++ b/storage/bdb/log/log_compare.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_compare.c,v 11.6 2002/01/11 15:52:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * log_compare --
+ * Compare two LSN's; return 1, 0, -1 if first is >, == or < second.
+ *
+ * EXTERN: int log_compare __P((const DB_LSN *, const DB_LSN *));
+ */
+int
+log_compare(lsn0, lsn1)
+ const DB_LSN *lsn0, *lsn1;
+{
+ if (lsn0->file != lsn1->file)
+ return (lsn0->file < lsn1->file ? -1 : 1);
+
+ if (lsn0->offset != lsn1->offset)
+ return (lsn0->offset < lsn1->offset ? -1 : 1);
+
+ return (0);
+}
diff --git a/storage/bdb/log/log_get.c b/storage/bdb/log/log_get.c
new file mode 100644
index 00000000000..c8b028da0fb
--- /dev/null
+++ b/storage/bdb/log/log_get.c
@@ -0,0 +1,1058 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_get.c,v 11.81 2002/08/14 20:09:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
+
+typedef enum { L_ALREADY, L_ACQUIRED, L_NONE } RLOCK;
+
+static int __log_c_close __P((DB_LOGC *, u_int32_t));
+static int __log_c_get __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_get_int __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_hdrchk __P((DB_LOGC *, HDR *, int *));
+static int __log_c_incursor __P((DB_LOGC *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_inregion __P((DB_LOGC *,
+ DB_LSN *, RLOCK *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_io __P((DB_LOGC *,
+ u_int32_t, u_int32_t, void *, size_t *, int *));
+static int __log_c_ondisk __P((DB_LOGC *,
+ DB_LSN *, DB_LSN *, int, HDR *, u_int8_t **, int *));
+static int __log_c_set_maxrec __P((DB_LOGC *, char *));
+static int __log_c_shortread __P((DB_LOGC *, int));
+
+/*
+ * __log_cursor --
+ * Create a log cursor.
+ *
+ * PUBLIC: int __log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ */
+int
+__log_cursor(dbenv, logcp, flags)
+ DB_ENV *dbenv;
+ DB_LOGC **logcp;
+ u_int32_t flags;
+{
+ DB_LOGC *logc;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_cursor", DB_INIT_LOG);
+
+ *logcp = NULL;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->log_cursor", flags, 0)) != 0)
+ return (ret);
+
+ /* Allocate memory for the cursor. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOGC), &logc)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &logc->c_fh)) != 0)
+ goto err;
+
+ logc->bp_size = DB_LOGC_BUF_SIZE;
+ if ((ret = __os_malloc(dbenv, logc->bp_size, &logc->bp)) != 0)
+ goto err;
+
+ logc->dbenv = dbenv;
+ logc->close = __log_c_close;
+ logc->get = __log_c_get;
+
+ *logcp = logc;
+ return (0);
+
+err: if (logc != NULL) {
+ if (logc->c_fh != NULL)
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+ }
+
+ return (ret);
+}
+
+/*
+ * __log_c_close --
+ * Close a log cursor.
+ */
+static int
+__log_c_close(logc, flags)
+ DB_LOGC *logc;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ PANIC_CHECK(dbenv);
+ if ((ret = __db_fchk(dbenv, "DB_LOGC->close", flags, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(logc->c_fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, logc->c_fh);
+
+ if (logc->c_dbt.data != NULL)
+ __os_free(dbenv, logc->c_dbt.data);
+
+ __os_free(dbenv, logc->bp);
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+
+ return (0);
+}
+
+/*
+ * __log_c_get --
+ * Get a log record.
+ */
+static int
+__log_c_get(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LSN saved_lsn;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_LAST:
+ case DB_NEXT:
+ case DB_PREV:
+ break;
+ case DB_SET:
+ if (IS_ZERO_LSN(*alsn)) {
+ __db_err(dbenv, "DB_LOGC->get: invalid LSN");
+ return (EINVAL);
+ }
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_LOGC->get", 1));
+ }
+
+ /*
+ * On error, we take care not to overwrite the caller's LSN. This
+ * is because callers looking for the end of the log loop using the
+ * DB_NEXT flag, and expect to take the last successful lsn out of
+ * the passed-in structure after DB_LOGC->get fails with DB_NOTFOUND.
+ *
+ * !!!
+ * This line is often flagged an uninitialized memory read during a
+ * Purify or similar tool run, as the application didn't initialize
+ * *alsn. If the application isn't setting the DB_SET flag, there is
+ * no reason it should have initialized *alsn, but we can't know that
+ * and we want to make sure we never overwrite whatever the application
+ * put in there.
+ */
+ saved_lsn = *alsn;
+
+ /*
+ * If we get one of the log's header records as a result of doing a
+ * DB_FIRST, DB_NEXT, DB_LAST or DB_PREV, repeat the operation, log
+ * file header records aren't useful to applications.
+ */
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
+ if (alsn->offset == 0 && (flags == DB_FIRST ||
+ flags == DB_NEXT || flags == DB_LAST || flags == DB_PREV)) {
+ switch (flags) {
+ case DB_FIRST:
+ flags = DB_NEXT;
+ break;
+ case DB_LAST:
+ flags = DB_PREV;
+ break;
+ }
+ if (F_ISSET(dbt, DB_DBT_MALLOC)) {
+ __os_free(dbenv, dbt->data);
+ dbt->data = NULL;
+ }
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __log_c_get_int --
+ * Get a log record; internal version.
+ */
+static int
+__log_c_get_int(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
+ DB_LSN *alsn;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN last_lsn, nlsn;
+ HDR hdr;
+ LOG *lp;
+ RLOCK rlock;
+ logfile_validity status;
+ u_int32_t cnt;
+ u_int8_t *rp;
+ int eof, is_hmac, ret;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ is_hmac = 0;
+
+ /*
+ * We don't acquire the log region lock until we need it, and we
+ * release it as soon as we're done.
+ */
+ rlock = F_ISSET(logc, DB_LOG_LOCKED) ? L_ALREADY : L_NONE;
+
+ nlsn = logc->c_lsn;
+ switch (flags) {
+ case DB_NEXT: /* Next log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* Increment the cursor by the cursor record size. */
+ nlsn.offset += logc->c_len;
+ break;
+ }
+ flags = DB_FIRST;
+ /* FALLTHROUGH */
+ case DB_FIRST: /* First log record. */
+ /* Find the first log file. */
+ if ((ret = __log_find(dblp, 1, &cnt, &status)) != 0)
+ goto err;
+
+ /*
+ * DB_LV_INCOMPLETE:
+ * Theoretically, the log file we want could be created
+ * but not yet written, the "first" log record must be
+ * in the log buffer.
+ * DB_LV_NORMAL:
+ * DB_LV_OLD_READABLE:
+ * We found a log file we can read.
+ * DB_LV_NONEXISTENT:
+ * No log files exist, the "first" log record must be in
+ * the log buffer.
+ * DB_LV_OLD_UNREADABLE:
+ * No readable log files exist, we're at the cross-over
+ * point between two versions. The "first" log record
+ * must be in the log buffer.
+ */
+ switch (status) {
+ case DB_LV_INCOMPLETE:
+ DB_ASSERT(lp->lsn.file == cnt);
+ /* FALLTHROUGH */
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+ nlsn.file = cnt;
+ break;
+ case DB_LV_NONEXISTENT:
+ nlsn.file = 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ nlsn.file = cnt + 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
+ }
+ nlsn.offset = 0;
+ break;
+ case DB_CURRENT: /* Current log record. */
+ break;
+ case DB_PREV: /* Previous log record. */
+ if (!IS_ZERO_LSN(nlsn)) {
+ /* If at start-of-file, move to the previous file. */
+ if (nlsn.offset == 0) {
+ if (nlsn.file == 1 ||
+ __log_valid(dblp,
+ nlsn.file - 1, 0, &status) != 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ if (status != DB_LV_NORMAL &&
+ status != DB_LV_OLD_READABLE) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+
+ --nlsn.file;
+ }
+ nlsn.offset = logc->c_prev;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST: /* Last log record. */
+ if (rlock == L_NONE) {
+ rlock = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
+ }
+ nlsn.file = lp->lsn.file;
+ nlsn.offset = lp->lsn.offset - lp->len;
+ break;
+ case DB_SET: /* Set log record. */
+ nlsn = *alsn;
+ break;
+ }
+
+ if (0) { /* Move to the next file. */
+next_file: ++nlsn.file;
+ nlsn.offset = 0;
+ }
+
+ /*
+ * The above switch statement should have set nlsn to the lsn of
+ * the requested record.
+ */
+
+ if (CRYPTO_ON(dbenv)) {
+ hdr.size = HDR_CRYPTO_SZ;
+ is_hmac = 1;
+ } else {
+ hdr.size = HDR_NORMAL_SZ;
+ is_hmac = 0;
+ }
+ /* Check to see if the record is in the cursor's buffer. */
+ if ((ret = __log_c_incursor(logc, &nlsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
+ goto cksum;
+
+ /*
+ * Look to see if we're moving backward in the log with the last record
+ * coming from the disk -- it means the record can't be in the region's
+ * buffer. Else, check the region's buffer.
+ *
+ * If the record isn't in the region's buffer, we're going to have to
+ * read the record from disk. We want to make a point of not reading
+ * past the end of the logical log (after recovery, there may be data
+ * after the end of the logical log, not to mention the log file may
+ * have been pre-allocated). So, zero out last_lsn, and initialize it
+ * inside __log_c_inregion -- if it's still zero when we check it in
+ * __log_c_ondisk, that's OK, it just means the logical end of the log
+ * isn't an issue for this request.
+ */
+ ZERO_LSN(last_lsn);
+ if (!F_ISSET(logc, DB_LOG_DISK) ||
+ log_compare(&nlsn, &logc->c_lsn) > 0) {
+ F_CLR(logc, DB_LOG_DISK);
+
+ if ((ret = __log_c_inregion(logc,
+ &nlsn, &rlock, &last_lsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
+ goto cksum;
+ }
+
+ /*
+ * We have to read from an on-disk file to retrieve the record.
+ * If we ever can't retrieve the record at offset 0, we're done,
+ * return EOF/DB_NOTFOUND.
+ *
+ * Discard the region lock if we're still holding it, the on-disk
+ * reading routines don't need it.
+ */
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ if ((ret = __log_c_ondisk(
+ logc, &nlsn, &last_lsn, flags, &hdr, &rp, &eof)) != 0)
+ goto err;
+ if (eof == 1) {
+ /*
+ * Only DB_NEXT automatically moves to the next file, and
+ * it only happens once.
+ */
+ if (flags != DB_NEXT || nlsn.offset == 0)
+ return (DB_NOTFOUND);
+ goto next_file;
+ }
+ F_SET(logc, DB_LOG_DISK);
+
+cksum: /*
+ * Discard the region lock if we're still holding it. (The path to
+ * get here is that we acquired the lock because of the caller's
+ * flag argument, but we found the record in the cursor's buffer.
+ * Improbable, but it's easy to avoid.
+ */
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * Checksum: there are two types of errors -- a configuration error
+ * or a checksum mismatch. The former is always bad. The latter is
+ * OK if we're searching for the end of the log, and very, very bad
+ * if we're reading random log records.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if ((ret = __db_check_chksum(dbenv, db_cipher,
+ hdr.chksum, rp + hdr.size, hdr.len - hdr.size, is_hmac)) != 0) {
+ if (F_ISSET(logc, DB_LOG_SILENT_ERR)) {
+ if (ret == 0 || ret == -1)
+ ret = EIO;
+ } else if (ret == -1) {
+ __db_err(dbenv,
+ "DB_LOGC->get: log record checksum mismatch");
+ __db_err(dbenv,
+ "DB_LOGC->get: catastrophic recovery may be required");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+ goto err;
+ }
+
+ /*
+ * If we got a 0-length record, that means we're in the midst of
+ * some bytes that got 0'd as the result of a vtruncate. We're
+ * going to have to retry.
+ */
+ if (hdr.len == 0) {
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ /* Zero'd records always indicate the end of a file. */
+ goto next_file;
+
+ case DB_LAST:
+ case DB_PREV:
+ /*
+ * We should never get here. If we recover a log
+ * file with 0's at the end, we'll treat the 0'd
+ * headers as the end of log and ignore them. If
+ * we're reading backwards from another file, then
+ * the first record in that new file should have its
+ * prev field set correctly.
+ */
+ __db_err(dbenv,
+ "Encountered zero length records while traversing backwards");
+ DB_ASSERT(0);
+ case DB_SET:
+ default:
+ /* Return the 0-length record. */
+ break;
+ }
+ }
+
+ /* Copy the record into the user's DBT. */
+ if ((ret = __db_retcopy(dbenv, dbt, rp + hdr.size,
+ (u_int32_t)(hdr.len - hdr.size),
+ &logc->c_dbt.data, &logc->c_dbt.ulen)) != 0)
+ goto err;
+
+ if (CRYPTO_ON(dbenv)) {
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ hdr.iv, dbt->data, hdr.len - hdr.size)) != 0) {
+ ret = EAGAIN;
+ goto err;
+ }
+ /*
+ * Return the original log record size to the user,
+ * even though we've allocated more than that, possibly.
+ * The log record is decrypted in the user dbt, not in
+ * the buffer, so we must do this here after decryption,
+ * not adjust the len passed to the __db_retcopy call.
+ */
+ dbt->size = hdr.orig_size;
+ }
+
+ /* Update the cursor and the returned LSN. */
+ *alsn = nlsn;
+ logc->c_lsn = nlsn;
+ logc->c_len = hdr.len;
+ logc->c_prev = hdr.prev;
+
+err: if (rlock == L_ACQUIRED)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __log_c_incursor --
+ * Check to see if the requested record is in the cursor's buffer.
+ */
+static int
+__log_c_incursor(logc, lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ u_int8_t *p;
+
+ *pp = NULL;
+
+ /*
+ * Test to see if the requested LSN could be part of the cursor's
+ * buffer.
+ *
+ * The record must be part of the same file as the cursor's buffer.
+ * The record must start at a byte offset equal to or greater than
+ * the cursor buffer.
+ * The record must not start at a byte offset after the cursor
+ * buffer's end.
+ */
+ if (logc->bp_lsn.file != lsn->file)
+ return (0);
+ if (logc->bp_lsn.offset > lsn->offset)
+ return (0);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->size)
+ return (0);
+
+ /*
+ * Read the record's header and check if the record is entirely held
+ * in the buffer. If the record is not entirely held, get it again.
+ * (The only advantage in having part of the record locally is that
+ * we might avoid a system call because we already have the HDR in
+ * memory.)
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
+ */
+ p = logc->bp + (lsn->offset - logc->bp_lsn.offset);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->len)
+ return (0);
+
+ *pp = p; /* Success. */
+
+ return (0);
+}
+
+/*
+ * __log_c_inregion --
+ * Check to see if the requested record is in the region's buffer.
+ */
+static int
+__log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ RLOCK *rlockp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ size_t len, nr;
+ u_int32_t b_disk, b_region;
+ int ret;
+ u_int8_t *p;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = ((DB_LOG *)logc->dbenv->lg_handle)->reginfo.primary;
+
+ ret = 0;
+ *pp = NULL;
+
+ /* If we haven't yet acquired the log region lock, do so. */
+ if (*rlockp == L_NONE) {
+ *rlockp = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * The routines to read from disk must avoid reading past the logical
+ * end of the log, so pass that information back to it.
+ *
+ * Since they're reading directly from the disk, they must also avoid
+ * reading past the offset we've written out. If the log was
+ * truncated, it's possible that there are zeroes or garbage on
+ * disk after this offset, and the logical end of the log can
+ * come later than this point if the log buffer isn't empty.
+ */
+ *last_lsn = lp->lsn;
+ if (last_lsn->offset > lp->w_off)
+ last_lsn->offset = lp->w_off;
+
+ /*
+ * Test to see if the requested LSN could be part of the region's
+ * buffer.
+ *
+ * During recovery, we read the log files getting the information to
+ * initialize the region. In that case, the region's lsn field will
+ * not yet have been filled in, use only the disk.
+ *
+ * The record must not start at a byte offset after the region buffer's
+ * end, since that means the request is for a record after the end of
+ * the log. Do this test even if the region's buffer is empty -- after
+ * recovery, the log files may continue past the declared end-of-log,
+ * and the disk reading routine will incorrectly attempt to read the
+ * remainder of the log.
+ *
+ * Otherwise, test to see if the region's buffer actually has what we
+ * want:
+ *
+ * The buffer must have some useful content.
+ * The record must be in the same file as the region's buffer and must
+ * start at a byte offset equal to or greater than the region's buffer.
+ */
+ if (IS_ZERO_LSN(lp->lsn))
+ return (0);
+ if (lsn->file > lp->lsn.file ||
+ (lsn->file == lp->lsn.file && lsn->offset >= lp->lsn.offset))
+ return (DB_NOTFOUND);
+ if (lp->b_off == 0)
+ return (0);
+ if (lsn->file < lp->f_lsn.file || lsn->offset < lp->f_lsn.offset)
+ return (0);
+
+ /*
+ * The current contents of the cursor's buffer will be useless for a
+ * future call -- trash it rather than try and make it look correct.
+ */
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * If the requested LSN is greater than the region buffer's first
+ * byte, we know the entire record is in the buffer.
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
+ */
+ if (lsn->offset > lp->f_lsn.offset) {
+ p = dblp->bufp + (lsn->offset - lp->w_off);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret =
+ __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+ memcpy(logc->bp, p, hdr->len);
+ *pp = logc->bp;
+ return (0);
+ }
+
+ /*
+ * There's a partial record, that is, the requested record starts
+ * in a log file and finishes in the region buffer. We have to
+ * find out how many bytes of the record are in the region buffer
+ * so we can copy them out into the cursor buffer. First, check
+ * to see if the requested record is the only record in the region
+ * buffer, in which case we should copy the entire region buffer.
+ *
+ * Else, walk back through the region's buffer to find the first LSN
+ * after the record that crosses the buffer boundary -- we can detect
+ * that LSN, because its "prev" field will reference the record we
+ * want. The bytes we need to copy from the region buffer are the
+ * bytes up to the record we find. The bytes we'll need to allocate
+ * to hold the log record are the bytes between the two offsets.
+ */
+ b_disk = lp->w_off - lsn->offset;
+ if (lp->b_off <= lp->len)
+ b_region = (u_int32_t)lp->b_off;
+ else
+ for (p = dblp->bufp + (lp->b_off - lp->len);;) {
+ memcpy(hdr, p, hdr->size);
+ if (hdr->prev == lsn->offset) {
+ b_region = (u_int32_t)(p - dblp->bufp);
+ break;
+ }
+ p = dblp->bufp + (hdr->prev - lp->w_off);
+ }
+
+ /*
+ * If we don't have enough room for the record, we have to allocate
+ * space. We have to do it while holding the region lock, which is
+ * truly annoying, but there's no way around it. This call is why
+ * we allocate cursor buffer space when allocating the cursor instead
+ * of waiting.
+ */
+ if (logc->bp_size <= b_region + b_disk) {
+ len = ALIGN((b_region + b_disk) * 2, 128);
+ if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+
+ /* Copy the region's bytes to the end of the cursor's buffer. */
+ p = (logc->bp + logc->bp_size) - b_region;
+ memcpy(p, dblp->bufp, b_region);
+
+ /* Release the region lock. */
+ if (*rlockp == L_ACQUIRED) {
+ *rlockp = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ /*
+ * Read the rest of the information from disk. Neither short reads
+ * or EOF are acceptable, the bytes we want had better be there.
+ */
+ if (b_disk != 0) {
+ p -= b_disk;
+ nr = b_disk;
+ if ((ret = __log_c_io(
+ logc, lsn->file, lsn->offset, p, &nr, NULL)) != 0)
+ return (ret);
+ if (nr < b_disk)
+ return (__log_c_shortread(logc, 0));
+ }
+
+ /* Copy the header information into the caller's structure. */
+ memcpy(hdr, p, hdr->size);
+
+ *pp = p;
+ return (0);
+}
+
+/*
+ * __log_c_ondisk --
+ * Read a record off disk.
+ */
+static int
+__log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ int flags, *eofp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ size_t len, nr;
+ u_int32_t offset;
+ int ret;
+
+ dbenv = logc->dbenv;
+ *eofp = 0;
+
+ nr = hdr->size;
+ if ((ret =
+ __log_c_io(logc, lsn->file, lsn->offset, hdr, &nr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* If we read 0 bytes, assume we've hit EOF. */
+ if (nr == 0) {
+ *eofp = 1;
+ return (0);
+ }
+
+ /* Check the HDR. */
+ if ((ret = __log_c_hdrchk(logc, hdr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* Otherwise, we should have gotten the bytes we wanted. */
+ if (nr < hdr->size)
+ return (__log_c_shortread(logc, 0));
+
+ /*
+ * Regardless of how we return, the previous contents of the cursor's
+ * buffer are useless -- trash it.
+ */
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * Otherwise, we now (finally!) know how big the record is. (Maybe
+ * we should have just stuck the length of the record into the LSN!?)
+ * Make sure we have enough space.
+ */
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret = __os_realloc(dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+
+ /*
+ * If we're moving forward in the log file, read this record in at the
+ * beginning of the buffer. Otherwise, read this record in at the end
+ * of the buffer, making sure we don't try and read before the start
+ * of the file. (We prefer positioning at the end because transaction
+ * aborts use DB_SET to move backward through the log and we might get
+ * lucky.)
+ *
+ * Read a buffer's worth, without reading past the logical EOF. The
+ * last_lsn may be a zero LSN, but that's OK, the test works anyway.
+ */
+ if (flags == DB_FIRST || flags == DB_NEXT)
+ offset = lsn->offset;
+ else if (lsn->offset + hdr->len < logc->bp_size)
+ offset = 0;
+ else
+ offset = (lsn->offset + hdr->len) - logc->bp_size;
+
+ nr = logc->bp_size;
+ if (lsn->file == last_lsn->file && offset + nr >= last_lsn->offset)
+ nr = last_lsn->offset - offset;
+
+ if ((ret =
+ __log_c_io(logc, lsn->file, offset, logc->bp, &nr, eofp)) != 0)
+ return (ret);
+
+ /*
+ * We should have at least gotten the bytes up-to-and-including the
+ * record we're reading.
+ */
+ if (nr < (lsn->offset + hdr->len) - offset)
+ return (__log_c_shortread(logc, 1));
+
+ /* Set up the return information. */
+ logc->bp_rlen = (u_int32_t)nr;
+ logc->bp_lsn.file = lsn->file;
+ logc->bp_lsn.offset = offset;
+
+ *pp = logc->bp + (lsn->offset - offset);
+
+ return (0);
+}
+
+/*
+ * __log_c_hdrchk --
+ *
+ * Check for corrupted HDRs before we use them to allocate memory or find
+ * records.
+ *
+ * If the log files were pre-allocated, a zero-filled HDR structure is the
+ * logical file end. However, we can see buffers filled with 0's during
+ * recovery, too (because multiple log buffers were written asynchronously,
+ * and one made it to disk before a different one that logically precedes
+ * it in the log file.
+ *
+ * XXX
+ * I think there's a potential pre-allocation recovery flaw here -- if we
+ * fail to write a buffer at the end of a log file (by scheduling its
+ * write asynchronously, and it never making it to disk), then succeed in
+ * writing a log file block to a subsequent log file, I don't think we will
+ * detect that the buffer of 0's should have marked the end of the log files
+ * during recovery. I think we may need to always write some garbage after
+ * each block write if we pre-allocate log files. (At the moment, we do not
+ * pre-allocate, so this isn't currently an issue.)
+ *
+ * Check for impossibly large records. The malloc should fail later, but we
+ * have customers that run mallocs that treat all allocation failures as fatal
+ * errors.
+ *
+ * Note that none of this is necessarily something awful happening. We let
+ * the application hand us any LSN they want, and it could be a pointer into
+ * the middle of a log record, there's no way to tell.
+ */
+static int
+__log_c_hdrchk(logc, hdr, eofp)
+ DB_LOGC *logc;
+ HDR *hdr;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ /* Sanity check the log record's size. */
+ if (hdr->len <= hdr->size)
+ goto err;
+ /*
+ * If the cursor's max-record value isn't yet set, it means we aren't
+ * reading these records from a log file and no check is necessary.
+ */
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec) {
+ /*
+ * If we fail the check, there's the pathological case that
+ * we're reading the last file, it's growing, and our initial
+ * check information was wrong. Get it again, to be sure.
+ */
+ if ((ret = __log_c_set_maxrec(logc, NULL)) != 0) {
+ __db_err(dbenv, "DB_LOGC->get: %s", db_strerror(ret));
+ return (ret);
+ }
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec)
+ goto err;
+ }
+
+ if (eofp != NULL) {
+ if (hdr->prev == 0 && hdr->chksum[0] == 0 && hdr->len == 0) {
+ *eofp = 1;
+ return (0);
+ }
+ *eofp = 0;
+ }
+ return (0);
+
+err: if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: invalid log record header");
+ return (EIO);
+}
+
+/*
+ * __log_c_io --
+ * Read records from a log file.
+ */
+static int
+__log_c_io(logc, fnum, offset, p, nrp, eofp)
+ DB_LOGC *logc;
+ u_int32_t fnum, offset;
+ void *p;
+ size_t *nrp;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ int ret;
+ char *np;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * If we've switched files, discard the current file handle and acquire
+ * a new one.
+ */
+ if (F_ISSET(logc->c_fh, DB_FH_VALID) && logc->bp_lsn.file != fnum)
+ if ((ret = __os_closehandle(dbenv, logc->c_fh)) != 0)
+ return (ret);
+ if (!F_ISSET(logc->c_fh, DB_FH_VALID)) {
+ if ((ret = __log_name(dblp, fnum,
+ &np, logc->c_fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ /*
+ * If we're allowed to return EOF, assume that's the
+ * problem, set the EOF status flag and return 0.
+ */
+ if (eofp != NULL) {
+ *eofp = 1;
+ ret = 0;
+ } else if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: %s: %s",
+ np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
+
+ if ((ret = __log_c_set_maxrec(logc, np)) != 0) {
+ __db_err(dbenv,
+ "DB_LOGC->get: %s: %s", np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
+ __os_free(dbenv, np);
+ }
+
+ /* Seek to the record's offset. */
+ if ((ret = __os_seek(dbenv,
+ logc->c_fh, 0, 0, offset, 0, DB_OS_SEEK_SET)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: seek: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ /* Read the data. */
+ if ((ret = __os_read(dbenv, logc->c_fh, p, *nrp, nrp)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: read: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __log_c_shortread --
+ * Read was short -- return a consistent error message and error.
+ */
+static int
+__log_c_shortread(logc, silent)
+ DB_LOGC *logc;
+ int silent;
+{
+ if (!silent || !F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(logc->dbenv, "DB_LOGC->get: short read");
+ return (EIO);
+}
+
+/*
+ * __log_c_set_maxrec --
+ * Bound the maximum log record size in a log file.
+ */
+static int
+__log_c_set_maxrec(logc, np)
+ DB_LOGC *logc;
+ char *np;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * We don't want to try and allocate huge chunks of memory because
+ * applications with error-checking malloc's often consider that a
+ * hard failure. If we're about to look at a corrupted record with
+ * a bizarre size, we need to know before trying to allocate space
+ * to hold it. We could read the persistent data at the beginning
+ * of the file but that's hard -- we may have to decrypt it, checksum
+ * it and so on. Stat the file instead.
+ */
+ if ((ret =
+ __os_ioinfo(dbenv, np, logc->c_fh, &mbytes, &bytes, NULL)) != 0)
+ return (ret);
+
+ logc->bp_maxrec = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If reading from the log file currently being written, we could get
+ * an incorrect size, that is, if the cursor was opened on the file
+ * when it had only a few hundred bytes, and then the cursor used to
+ * move forward in the file, after more log records were written, the
+ * original stat value would be wrong. Use the maximum of the current
+ * log file size and the size of the buffer -- that should represent
+ * the max of any log record currently in the file.
+ *
+ * The log buffer size is set when the environment is opened and never
+ * changed, we don't need a lock on it.
+ */
+ lp = dblp->reginfo.primary;
+ logc->bp_maxrec += lp->buffer_size;
+
+ return (0);
+}
diff --git a/storage/bdb/log/log_method.c b/storage/bdb/log/log_method.c
new file mode 100644
index 00000000000..42adaf11c6c
--- /dev/null
+++ b/storage/bdb/log/log_method.c
@@ -0,0 +1,188 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_method.c,v 11.32 2002/05/30 22:16:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __log_set_lg_bsize __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_dir __P((DB_ENV *, const char *));
+static int __log_set_lg_max __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_regionmax __P((DB_ENV *, u_int32_t));
+
+/*
+ * __log_dbenv_create --
+ * Log specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __log_dbenv_create __P((DB_ENV *));
+ */
+void
+__log_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->lg_bsize = LG_BSIZE_DEFAULT;
+ dbenv->lg_regionmax = LG_BASE_REGION_SIZE;
+
+#ifdef HAVE_RPC
+ /*
+ * If we have a client, overwrite what we just setup to
+ * point to client functions.
+ */
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lg_bsize = __dbcl_set_lg_bsize;
+ dbenv->set_lg_dir = __dbcl_set_lg_dir;
+ dbenv->set_lg_max = __dbcl_set_lg_max;
+ dbenv->set_lg_regionmax = __dbcl_set_lg_regionmax;
+ dbenv->log_archive = __dbcl_log_archive;
+ dbenv->log_cursor = __dbcl_log_cursor;
+ dbenv->log_file = __dbcl_log_file;
+ dbenv->log_flush = __dbcl_log_flush;
+ dbenv->log_put = __dbcl_log_put;
+ dbenv->log_stat = __dbcl_log_stat;
+ } else
+#endif
+ {
+ dbenv->set_lg_bsize = __log_set_lg_bsize;
+ dbenv->set_lg_dir = __log_set_lg_dir;
+ dbenv->set_lg_max = __log_set_lg_max;
+ dbenv->set_lg_regionmax = __log_set_lg_regionmax;
+ dbenv->log_archive = __log_archive;
+ dbenv->log_cursor = __log_cursor;
+ dbenv->log_file = __log_file;
+ dbenv->log_flush = __log_flush;
+ dbenv->log_put = __log_put;
+ dbenv->log_stat = __log_stat;
+ }
+}
+
+/*
+ * __log_set_lg_bsize --
+ * Set the log buffer size.
+ */
+static int
+__log_set_lg_bsize(dbenv, lg_bsize)
+ DB_ENV *dbenv;
+ u_int32_t lg_bsize;
+{
+ u_int32_t lg_max;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_bsize");
+
+ if (lg_bsize == 0)
+ lg_bsize = LG_BSIZE_DEFAULT;
+
+ /* Let's not be silly. */
+ lg_max = dbenv->lg_size == 0 ? LG_MAX_DEFAULT : dbenv->lg_size;
+ if (lg_bsize > lg_max / 4) {
+ __db_err(dbenv, "log buffer size must be <= log file size / 4");
+ return (EINVAL);
+ }
+
+ dbenv->lg_bsize = lg_bsize;
+ return (0);
+}
+
+/*
+ * __log_set_lg_max --
+ * Set the maximum log file size.
+ */
+static int
+__log_set_lg_max(dbenv, lg_max)
+ DB_ENV *dbenv;
+ u_int32_t lg_max;
+{
+ LOG *region;
+
+ if (lg_max == 0)
+ lg_max = LG_MAX_DEFAULT;
+
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOGGING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_lg_max", DB_INIT_LOG));
+ region = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary;
+
+ /* Let's not be silly. */
+ if (lg_max < region->buffer_size * 4)
+ goto err;
+ region->log_nsize = lg_max;
+ } else {
+ /* Let's not be silly. */
+ if (lg_max < dbenv->lg_bsize * 4)
+ goto err;
+ dbenv->lg_size = lg_max;
+ }
+
+ return (0);
+
+err: __db_err(dbenv, "log file size must be >= log buffer size * 4");
+ return (EINVAL);
+}
+
+/*
+ * __log_set_lg_regionmax --
+ * Set the region size.
+ */
+static int
+__log_set_lg_regionmax(dbenv, lg_regionmax)
+ DB_ENV *dbenv;
+ u_int32_t lg_regionmax;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_regionmax");
+
+ /* Let's not be silly. */
+ if (lg_regionmax != 0 && lg_regionmax < LG_BASE_REGION_SIZE) {
+ __db_err(dbenv,
+ "log file size must be >= %d", LG_BASE_REGION_SIZE);
+ return (EINVAL);
+ }
+
+ dbenv->lg_regionmax = lg_regionmax;
+ return (0);
+}
+
+/*
+ * __log_set_lg_dir --
+ * Set the log file directory.
+ */
+static int
+__log_set_lg_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_log_dir != NULL)
+ __os_free(dbenv, dbenv->db_log_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_log_dir));
+}
diff --git a/storage/bdb/log/log_put.c b/storage/bdb/log/log_put.c
new file mode 100644
index 00000000000..64276fa8315
--- /dev/null
+++ b/storage/bdb/log/log_put.c
@@ -0,0 +1,1250 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: log_put.c,v 11.112 2002/09/10 02:39:26 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __log_encrypt_record __P((DB_ENV *, DBT *, HDR *, u_int32_t));
+static int __log_fill __P((DB_LOG *, DB_LSN *, void *, u_int32_t));
+static int __log_flush_commit __P((DB_ENV *, const DB_LSN *, u_int32_t));
+static int __log_flush_int __P((DB_LOG *, const DB_LSN *, int));
+static int __log_newfh __P((DB_LOG *));
+static int __log_put_next __P((DB_ENV *,
+ DB_LSN *, const DBT *, HDR *, DB_LSN *));
+static int __log_putr __P((DB_LOG *,
+ DB_LSN *, const DBT *, u_int32_t, HDR *));
+static int __log_write __P((DB_LOG *, void *, u_int32_t));
+
+/*
+ * __log_put --
+ * Write a log record. This is the public interface, DB_ENV->log_put.
+ *
+ * PUBLIC: int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__log_put(dbenv, lsnp, udbt, flags)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ const DBT *udbt;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ DBT *dbt, t;
+ DB_LOG *dblp;
+ DB_LSN lsn, old_lsn;
+ HDR hdr;
+ LOG *lp;
+ u_int32_t do_flush, op, writeonly;
+ int lock_held, need_free, ret;
+ u_int8_t *key;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_put", DB_INIT_LOG);
+
+ /* Validate arguments. */
+ op = DB_OPFLAGS_MASK & flags;
+ if (op != 0 && op != DB_COMMIT)
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* Check for allowed bit-flags. */
+ if (LF_ISSET(~(DB_OPFLAGS_MASK |
+ DB_FLUSH | DB_NOCOPY | DB_PERMANENT | DB_WRNOSYNC)))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* DB_WRNOSYNC and DB_FLUSH are mutually exclusive. */
+ if (LF_ISSET(DB_WRNOSYNC) && LF_ISSET(DB_FLUSH))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 1));
+
+ /* Replication clients should never write log records. */
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT) ||
+ F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->log_put is illegal on replication clients");
+ return (EINVAL);
+ }
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ db_cipher = dbenv->crypto_handle;
+ dbt = &t;
+ t = *udbt;
+ lock_held = need_free = 0;
+ do_flush = LF_ISSET(DB_FLUSH);
+ writeonly = LF_ISSET(DB_WRNOSYNC);
+
+ /*
+ * If we are coming from the logging code, we use an internal
+ * flag, DB_NOCOPY, because we know we can overwrite/encrypt
+ * the log record in place. Otherwise, if a user called log_put
+ * then we must copy it to new memory so that we know we can
+ * write it.
+ *
+ * We also must copy it to new memory if we are a replication
+ * master so that we retain an unencrypted copy of the log
+ * record to send to clients.
+ */
+ if (!LF_ISSET(DB_NOCOPY) || F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(udbt->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, udbt->data, udbt->size);
+ }
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, udbt->size)) != 0)
+ goto err;
+ if (CRYPTO_ON(dbenv))
+ key = db_cipher->mac_key;
+ else
+ key = NULL;
+ /* Otherwise, we actually have a record to put. Put it. */
+
+ /* Before we grab the region lock, calculate the record's checksum. */
+ __db_chksum(dbt->data, dbt->size, key, hdr.chksum);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+
+ ZERO_LSN(old_lsn);
+ if ((ret = __log_put_next(dbenv, &lsn, dbt, &hdr, &old_lsn)) != 0)
+ goto err;
+
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ /*
+ * Replication masters need to drop the lock to send
+ * messages, but we want to drop and reacquire it a minimal
+ * number of times.
+ */
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ lock_held = 0;
+
+ /*
+ * If we changed files and we're in a replicated
+ * environment, we need to inform our clients now that
+ * we've dropped the region lock.
+ *
+ * Note that a failed NEWFILE send is a dropped message
+ * that our client can handle, so we can ignore it. It's
+ * possible that the record we already put is a commit, so
+ * we don't just want to return failure.
+ */
+ if (!IS_ZERO_LSN(old_lsn))
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWFILE, &old_lsn, NULL, 0);
+
+ /*
+ * Then send the log record itself on to our clients.
+ *
+ * If the send fails and we're a commit or checkpoint,
+ * there's nothing we can do; the record's in the log.
+ * Flush it, even if we're running with TXN_NOSYNC, on the
+ * grounds that it should be in durable form somewhere.
+ */
+ /*
+ * !!!
+ * In the crypto case, we MUST send the udbt, not the
+ * now-encrypted dbt. Clients have no way to decrypt
+ * without the header.
+ */
+ if ((__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, udbt, flags) != 0) &&
+ LF_ISSET(DB_PERMANENT))
+ do_flush |= DB_FLUSH;
+ }
+
+ /*
+ * If needed, do a flush. Note that failures at this point
+ * are only permissible if we know we haven't written a commit
+ * record; __log_flush_commit is responsible for enforcing this.
+ *
+ * If a flush is not needed, see if WRITE_NOSYNC was set and we
+ * need to write out the log buffer.
+ */
+ if (do_flush || writeonly) {
+ if (!lock_held) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+ }
+ if (do_flush)
+ ret = __log_flush_commit(dbenv, &lsn, flags);
+ else if (lp->b_off != 0)
+ /*
+ * writeonly: if there's anything in the current
+ * log buffer, we need to write it out.
+ */
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) == 0)
+ lp->b_off = 0;
+ }
+
+err: if (lock_held)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (need_free)
+ __os_free(dbenv, dbt->data);
+
+ if (ret == 0)
+ *lsnp = lsn;
+
+ return (ret);
+}
+
+/*
+ * __log_txn_lsn --
+ *
+ * PUBLIC: void __log_txn_lsn
+ * PUBLIC: __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
+ */
+void
+__log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ u_int32_t *mbytesp, *bytesp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We are trying to get the LSN of the last entry in the log. We use
+ * this in two places: 1) DB_ENV->txn_checkpiont uses it as a first
+ * value when trying to compute an LSN such that all transactions begun
+ * before it are complete. 2) DB_ENV->txn_begin uses it as the
+ * begin_lsn.
+ *
+ * Typically, it's easy to get the last written LSN, you simply look
+ * at the current log pointer and back up the number of bytes of the
+ * last log record. However, if the last thing we did was write the
+ * log header of a new log file, then, this doesn't work, so we return
+ * the first log record that will be written in this new file.
+ */
+ *lsnp = lp->lsn;
+ if (lp->lsn.offset > lp->len)
+ lsnp->offset -= lp->len;
+
+ /*
+ * Since we're holding the log region lock, return the bytes put into
+ * the log since the last checkpoint, transaction checkpoint needs it.
+ *
+ * We add the current buffer offset so as to count bytes that have not
+ * yet been written, but are sitting in the log buffer.
+ */
+ if (mbytesp != NULL) {
+ *mbytesp = lp->stat.st_wc_mbytes;
+ *bytesp = (u_int32_t)(lp->stat.st_wc_bytes + lp->b_off);
+
+ lp->stat.st_wc_mbytes = lp->stat.st_wc_bytes = 0;
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_put_next --
+ * Put the given record as the next in the log, wherever that may
+ * turn out to be.
+ */
+static int
+__log_put_next(dbenv, lsn, dbt, hdr, old_lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ HDR *hdr;
+ DB_LSN *old_lsnp;
+{
+ DB_LOG *dblp;
+ DB_LSN old_lsn;
+ LOG *lp;
+ int newfile, ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * Save a copy of lp->lsn before we might decide to switch log
+ * files and change it. If we do switch log files, and we're
+ * doing replication, we'll need to tell our clients about the
+ * switch, and they need to receive a NEWFILE message
+ * with this "would-be" LSN in order to know they're not
+ * missing any log records.
+ */
+ old_lsn = lp->lsn;
+ newfile = 0;
+
+ /*
+ * If this information won't fit in the file, or if we're a
+ * replication client environment and have been told to do so,
+ * swap files.
+ */
+ if (lp->lsn.offset == 0 ||
+ lp->lsn.offset + hdr->size + dbt->size > lp->log_size) {
+ if (hdr->size + sizeof(LOGP) + dbt->size > lp->log_size) {
+ __db_err(dbenv,
+ "DB_ENV->log_put: record larger than maximum file size");
+ return (EINVAL);
+ }
+
+ if ((ret = __log_newfile(dblp, NULL)) != 0)
+ return (ret);
+
+ /*
+ * Flag that we switched files, in case we're a master
+ * and need to send this information to our clients.
+ * We postpone doing the actual send until we can
+ * safely release the log region lock and are doing so
+ * anyway.
+ */
+ newfile = 1;
+
+ if (dbenv->db_noticecall != NULL)
+ dbenv->db_noticecall(dbenv, DB_NOTICE_LOGFILE_CHANGED);
+ }
+
+ /*
+ * The offset into the log file at this point is the LSN where
+ * we're about to put this record, and is the LSN the caller wants.
+ */
+ *lsn = lp->lsn;
+
+ /* If we switched log files, let our caller know where. */
+ if (newfile)
+ *old_lsnp = old_lsn;
+
+ /* Actually put the record. */
+ return (__log_putr(dblp, lsn, dbt, lp->lsn.offset - lp->len, hdr));
+}
+
+/*
+ * __log_flush_commit --
+ * Flush a record for which the DB_FLUSH flag to log_put has been set.
+ */
+static int
+__log_flush_commit(dbenv, lsnp, flags)
+ DB_ENV *dbenv;
+ const DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN flush_lsn;
+ LOG *lp;
+ int ret;
+ u_int32_t op;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ flush_lsn = *lsnp;
+ op = DB_OPFLAGS_MASK & flags;
+
+ if ((ret = __log_flush_int(dblp, &flush_lsn, 1)) == 0)
+ return (0);
+
+ /*
+ * If a flush supporting a transaction commit fails, we must abort the
+ * transaction. (If we aren't doing a commit, return the failure; if
+ * if the commit we care about made it to disk successfully, we just
+ * ignore the failure, because there's no way to undo the commit.)
+ */
+ if (op != DB_COMMIT)
+ return (ret);
+
+ if (flush_lsn.file != lp->lsn.file || flush_lsn.offset < lp->w_off)
+ return (0);
+
+ /*
+ * Else, make sure that the commit record does not get out after we
+ * abort the transaction. Do this by overwriting the commit record
+ * in the buffer. (Note that other commits in this buffer will wait
+ * wait until a sucessful write happens, we do not wake them.) We
+ * point at the right part of the buffer and write an abort record
+ * over the commit. We must then try and flush the buffer again,
+ * since the interesting part of the buffer may have actually made
+ * it out to disk before there was a failure, we can't know for sure.
+ */
+ if (__txn_force_abort(dbenv,
+ dblp->bufp + flush_lsn.offset - lp->w_off) == 0)
+ (void)__log_flush_int(dblp, &flush_lsn, 0);
+
+ return (ret);
+}
+
+/*
+ * __log_newfile --
+ * Initialize and switch to a new log file. (Note that this is
+ * called both when no log yet exists and when we fill a log file.)
+ *
+ * PUBLIC: int __log_newfile __P((DB_LOG *, DB_LSN *));
+ */
+int
+__log_newfile(dblp, lsnp)
+ DB_LOG *dblp;
+ DB_LSN *lsnp;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DBT t;
+ HDR hdr;
+ LOG *lp;
+ int need_free, ret;
+ u_int32_t lastoff;
+ size_t tsize;
+ u_int8_t *tmp;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /* If we're not at the beginning of a file already, start a new one. */
+ if (lp->lsn.offset != 0) {
+ /*
+ * Flush the log so this file is out and can be closed. We
+ * cannot release the region lock here because we need to
+ * protect the end of the file while we switch. In
+ * particular, a thread with a smaller record than ours
+ * could detect that there is space in the log. Even
+ * blocking that event by declaring the file full would
+ * require all threads to wait here so that the lsn.file
+ * can be moved ahead after the flush completes. This
+ * probably can be changed if we had an lsn for the
+ * previous file and one for the curent, but it does not
+ * seem like this would get much more throughput, if any.
+ */
+ if ((ret = __log_flush_int(dblp, NULL, 0)) != 0)
+ return (ret);
+
+ DB_ASSERT(lp->b_off == 0);
+ /*
+ * Save the last known offset from the previous file, we'll
+ * need it to initialize the persistent header information.
+ */
+ lastoff = lp->lsn.offset;
+
+ /* Point the current LSN to the new file. */
+ ++lp->lsn.file;
+ lp->lsn.offset = 0;
+
+ /* Reset the file write offset. */
+ lp->w_off = 0;
+ } else
+ lastoff = 0;
+
+ /*
+ * Insert persistent information as the first record in every file.
+ * Note that the previous length is wrong for the very first record
+ * of the log, but that's okay, we check for it during retrieval.
+ */
+ DB_ASSERT(lp->b_off == 0);
+
+ memset(&t, 0, sizeof(t));
+ memset(&hdr, 0, sizeof(HDR));
+
+ need_free = 0;
+ tsize = sizeof(LOGP);
+ db_cipher = dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ tsize += db_cipher->adj_size(tsize);
+ if ((ret = __os_calloc(dbenv, 1, tsize, &tmp)) != 0)
+ return (ret);
+ lp->persist.log_size = lp->log_size = lp->log_nsize;
+ memcpy(tmp, &lp->persist, sizeof(LOGP));
+ t.data = tmp;
+ t.size = (u_int32_t)tsize;
+ need_free = 1;
+
+ if ((ret =
+ __log_encrypt_record(dbenv, &t, &hdr, (u_int32_t)tsize)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+ lsn = lp->lsn;
+ if ((ret = __log_putr(dblp, &lsn,
+ &t, lastoff == 0 ? 0 : lastoff - lp->len, &hdr)) != 0)
+ goto err;
+
+ /* Update the LSN information returned to the caller. */
+ if (lsnp != NULL)
+ *lsnp = lp->lsn;
+
+err:
+ if (need_free)
+ __os_free(dbenv, tmp);
+ return (ret);
+}
+
+/*
+ * __log_putr --
+ * Actually put a record into the log.
+ */
+static int
+__log_putr(dblp, lsn, dbt, prev, h)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ const DBT *dbt;
+ u_int32_t prev;
+ HDR *h;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN f_lsn;
+ LOG *lp;
+ HDR tmp, *hdr;
+ int ret, t_ret;
+ size_t b_off, nr;
+ u_int32_t w_off;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If we weren't given a header, use a local one.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if (h == NULL) {
+ hdr = &tmp;
+ memset(hdr, 0, sizeof(HDR));
+ if (CRYPTO_ON(dbenv))
+ hdr->size = HDR_CRYPTO_SZ;
+ else
+ hdr->size = HDR_NORMAL_SZ;
+ } else
+ hdr = h;
+
+ /* Save our position in case we fail. */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+
+ /*
+ * Initialize the header. If we just switched files, lsn.offset will
+ * be 0, and what we really want is the offset of the previous record
+ * in the previous file. Fortunately, prev holds the value we want.
+ */
+ hdr->prev = prev;
+ hdr->len = (u_int32_t)hdr->size + dbt->size;
+
+ /*
+ * If we were passed in a nonzero checksum, our caller calculated
+ * the checksum before acquiring the log mutex, as an optimization.
+ *
+ * If our caller calculated a real checksum of 0, we'll needlessly
+ * recalculate it. C'est la vie; there's no out-of-bounds value
+ * here.
+ */
+ if (hdr->chksum[0] == 0)
+ __db_chksum(dbt->data, dbt->size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL,
+ hdr->chksum);
+
+ if ((ret = __log_fill(dblp, lsn, hdr, (u_int32_t)hdr->size)) != 0)
+ goto err;
+
+ if ((ret = __log_fill(dblp, lsn, dbt->data, dbt->size)) != 0)
+ goto err;
+
+ lp->len = (u_int32_t)(hdr->size + dbt->size);
+ lp->lsn.offset += (u_int32_t)(hdr->size + dbt->size);
+ return (0);
+err:
+ /*
+ * If we wrote more than one buffer before failing, get the
+ * first one back. The extra buffers will fail the checksums
+ * and be ignored.
+ */
+ if (w_off + lp->buffer_size < lp->w_off) {
+ if ((t_ret =
+ __os_seek(dbenv,
+ &dblp->lfh, 0, 0, w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (t_ret = __os_read(dbenv, &dblp->lfh, dblp->bufp,
+ b_off, &nr)) != 0)
+ return (__db_panic(dbenv, t_ret));
+ if (nr != b_off) {
+ __db_err(dbenv, "Short read while restoring log");
+ return (__db_panic(dbenv, EIO));
+ }
+ }
+
+ /* Reset to where we started. */
+ lp->w_off = w_off;
+ lp->b_off = b_off;
+ lp->f_lsn = f_lsn;
+
+ return (ret);
+}
+
+/*
+ * __log_flush --
+ * Write all records less than or equal to the specified LSN.
+ *
+ * PUBLIC: int __log_flush __P((DB_ENV *, const DB_LSN *));
+ */
+int
+__log_flush(dbenv, lsn)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+{
+ DB_LOG *dblp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_flush", DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_flush_int(dblp, lsn, 1);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_flush_int --
+ * Write all records less than or equal to the specified LSN; internal
+ * version.
+ */
+static int
+__log_flush_int(dblp, lsnp, release)
+ DB_LOG *dblp;
+ const DB_LSN *lsnp;
+ int release;
+{
+ DB_ENV *dbenv;
+ DB_LSN flush_lsn, f_lsn;
+ DB_MUTEX *flush_mutexp;
+ LOG *lp;
+ int current, do_flush, first, ret;
+ size_t b_off;
+ struct __db_commit *commit;
+ u_int32_t ncommit, w_off;
+
+ ret = 0;
+ ncommit = 0;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+ flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
+
+ /*
+ * If no LSN specified, flush the entire log by setting the flush LSN
+ * to the last LSN written in the log. Otherwise, check that the LSN
+ * isn't a non-existent record for the log.
+ */
+ if (lsnp == NULL) {
+ flush_lsn.file = lp->lsn.file;
+ flush_lsn.offset = lp->lsn.offset - lp->len;
+ } else if (lsnp->file > lp->lsn.file ||
+ (lsnp->file == lp->lsn.file &&
+ lsnp->offset > lp->lsn.offset - lp->len)) {
+ __db_err(dbenv,
+ "DB_ENV->log_flush: LSN past current end-of-log");
+ return (EINVAL);
+ } else {
+ /*
+ * See if we need to wait. s_lsn is not locked so some
+ * care is needed. The sync point can only move forward.
+ * If the file we want is in the past we are done.
+ * If the file numbers are the same check the offset.
+ * If this fails check the file numbers again since the
+ * offset might have changed while we were looking.
+ * This all assumes we can read an integer in one
+ * state or the other, not in transition.
+ */
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ if (lp->s_lsn.file == lsnp->file &&
+ lp->s_lsn.offset > lsnp->offset)
+ return (0);
+
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ flush_lsn = *lsnp;
+ }
+
+ /*
+ * If a flush is in progress and we're allowed to do so, drop
+ * the region lock and block waiting for the next flush.
+ */
+ if (release && lp->in_flush != 0) {
+ if ((commit = SH_TAILQ_FIRST(
+ &lp->free_commits, __db_commit)) == NULL) {
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr,
+ sizeof(struct __db_commit),
+ MUTEX_ALIGN, &commit)) != 0)
+ goto flush;
+ memset(commit, 0, sizeof(*commit));
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo,
+ &commit->mutex, MUTEX_SELF_BLOCK |
+ MUTEX_NO_RLOCK)) != 0) {
+ __db_shalloc_free(dblp->reginfo.addr, commit);
+ return (ret);
+ }
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ } else
+ SH_TAILQ_REMOVE(
+ &lp->free_commits, commit, links, __db_commit);
+
+ lp->ncommit++;
+
+ /*
+ * Flushes may be requested out of LSN order; be
+ * sure we only move lp->t_lsn forward.
+ */
+ if (log_compare(&lp->t_lsn, &flush_lsn) < 0)
+ lp->t_lsn = flush_lsn;
+
+ commit->lsn = flush_lsn;
+ SH_TAILQ_INSERT_HEAD(
+ &lp->commits, commit, links, __db_commit);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ /* Wait here for the in-progress flush to finish. */
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->ncommit--;
+ /*
+ * Grab the flag before freeing the struct to see if
+ * we need to flush the log to commit. If so,
+ * use the maximal lsn for any committing thread.
+ */
+ do_flush = F_ISSET(commit, DB_COMMIT_FLUSH);
+ F_CLR(commit, DB_COMMIT_FLUSH);
+ SH_TAILQ_INSERT_HEAD(
+ &lp->free_commits, commit, links, __db_commit);
+ if (do_flush) {
+ lp->in_flush--;
+ flush_lsn = lp->t_lsn;
+ } else
+ return (0);
+ }
+
+ /*
+ * Protect flushing with its own mutex so we can release
+ * the region lock except during file switches.
+ */
+flush: MUTEX_LOCK(dbenv, flush_mutexp);
+
+ /*
+ * If the LSN is less than or equal to the last-sync'd LSN, we're done.
+ * Note, the last-sync LSN saved in s_lsn is the LSN of the first byte
+ * after the byte we absolutely know was written to disk, so the test
+ * is <, not <=.
+ */
+ if (flush_lsn.file < lp->s_lsn.file ||
+ (flush_lsn.file == lp->s_lsn.file &&
+ flush_lsn.offset < lp->s_lsn.offset)) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ /*
+ * We may need to write the current buffer. We have to write the
+ * current buffer if the flush LSN is greater than or equal to the
+ * buffer's starting LSN.
+ */
+ current = 0;
+ if (lp->b_off != 0 && log_compare(&flush_lsn, &lp->f_lsn) >= 0) {
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ lp->b_off = 0;
+ current = 1;
+ }
+
+ /*
+ * It's possible that this thread may never have written to this log
+ * file. Acquire a file descriptor if we don't already have one.
+ * One last check -- if we're not writing anything from the current
+ * buffer, don't bother. We have nothing to write and nothing to
+ * sync.
+ */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if (!current || (ret = __log_newfh(dblp)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ /*
+ * We are going to flush, release the region.
+ * First get the current state of the buffer since
+ * another write may come in, but we may not flush it.
+ */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+ lp->in_flush++;
+ if (release)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Sync all writes to disk. */
+ if ((ret = __os_fsync(dbenv, &dblp->lfh)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __db_panic(dbenv, ret);
+ return (ret);
+ }
+
+ /*
+ * Set the last-synced LSN.
+ * This value must be set to the LSN past the last complete
+ * record that has been flushed. This is at least the first
+ * lsn, f_lsn. If the buffer is empty, b_off == 0, then
+ * we can move up to write point since the first lsn is not
+ * set for the new buffer.
+ */
+ lp->s_lsn = f_lsn;
+ if (b_off == 0)
+ lp->s_lsn.offset = w_off;
+
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->in_flush--;
+ ++lp->stat.st_scount;
+
+ /*
+ * How many flush calls (usually commits) did this call actually sync?
+ * At least one, if it got here.
+ */
+ ncommit = 1;
+done:
+ if (lp->ncommit != 0) {
+ first = 1;
+ for (commit = SH_TAILQ_FIRST(&lp->commits, __db_commit);
+ commit != NULL;
+ commit = SH_TAILQ_NEXT(commit, links, __db_commit))
+ if (log_compare(&lp->s_lsn, &commit->lsn) > 0) {
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ ncommit++;
+ } else if (first == 1) {
+ F_SET(commit, DB_COMMIT_FLUSH);
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ /*
+ * This thread will wake and flush.
+ * If another thread commits and flushes
+ * first we will waste a trip trough the
+ * mutex.
+ */
+ lp->in_flush++;
+ first = 0;
+ }
+ }
+ if (lp->stat.st_maxcommitperflush < ncommit)
+ lp->stat.st_maxcommitperflush = ncommit;
+ if (lp->stat.st_mincommitperflush > ncommit ||
+ lp->stat.st_mincommitperflush == 0)
+ lp->stat.st_mincommitperflush = ncommit;
+
+ return (ret);
+}
+
+/*
+ * __log_fill --
+ * Write information into the log.
+ */
+static int
+__log_fill(dblp, lsn, addr, len)
+ DB_LOG *dblp;
+ DB_LSN *lsn;
+ void *addr;
+ u_int32_t len;
+{
+ LOG *lp;
+ u_int32_t bsize, nrec;
+ size_t nw, remain;
+ int ret;
+
+ lp = dblp->reginfo.primary;
+ bsize = lp->buffer_size;
+
+ while (len > 0) { /* Copy out the data. */
+ /*
+ * If we're beginning a new buffer, note the user LSN to which
+ * the first byte of the buffer belongs. We have to know this
+ * when flushing the buffer so that we know if the in-memory
+ * buffer needs to be flushed.
+ */
+ if (lp->b_off == 0)
+ lp->f_lsn = *lsn;
+
+ /*
+ * If we're on a buffer boundary and the data is big enough,
+ * copy as many records as we can directly from the data.
+ */
+ if (lp->b_off == 0 && len >= bsize) {
+ nrec = len / bsize;
+ if ((ret = __log_write(dblp, addr, nrec * bsize)) != 0)
+ return (ret);
+ addr = (u_int8_t *)addr + nrec * bsize;
+ len -= nrec * bsize;
+ ++lp->stat.st_wcount_fill;
+ continue;
+ }
+
+ /* Figure out how many bytes we can copy this time. */
+ remain = bsize - lp->b_off;
+ nw = remain > len ? len : remain;
+ memcpy(dblp->bufp + lp->b_off, addr, nw);
+ addr = (u_int8_t *)addr + nw;
+ len -= (u_int32_t)nw;
+ lp->b_off += nw;
+
+ /* If we fill the buffer, flush it. */
+ if (lp->b_off == bsize) {
+ if ((ret = __log_write(dblp, dblp->bufp, bsize)) != 0)
+ return (ret);
+ lp->b_off = 0;
+ ++lp->stat.st_wcount_fill;
+ }
+ }
+ return (0);
+}
+
+/*
+ * __log_write --
+ * Write the log buffer to disk.
+ */
+static int
+__log_write(dblp, addr, len)
+ DB_LOG *dblp;
+ void *addr;
+ u_int32_t len;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ size_t nw;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * If we haven't opened the log file yet or the current one
+ * has changed, acquire a new log file.
+ */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if ((ret = __log_newfh(dblp)) != 0)
+ return (ret);
+
+ /*
+ * Seek to the offset in the file (someone may have written it
+ * since we last did).
+ */
+ if ((ret =
+ __os_seek(dbenv,
+ &dblp->lfh, 0, 0, lp->w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_write(dbenv, &dblp->lfh, addr, len, &nw)) != 0)
+ return (ret);
+
+ /* Reset the buffer offset and update the seek offset. */
+ lp->w_off += len;
+
+ /* Update written statistics. */
+ if ((lp->stat.st_w_bytes += len) >= MEGABYTE) {
+ lp->stat.st_w_bytes -= MEGABYTE;
+ ++lp->stat.st_w_mbytes;
+ }
+ if ((lp->stat.st_wc_bytes += len) >= MEGABYTE) {
+ lp->stat.st_wc_bytes -= MEGABYTE;
+ ++lp->stat.st_wc_mbytes;
+ }
+ ++lp->stat.st_wcount;
+
+ return (0);
+}
+
+/*
+ * __log_file --
+ * Map a DB_LSN to a file name.
+ *
+ * PUBLIC: int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ */
+int
+__log_file(dbenv, lsn, namep, len)
+ DB_ENV *dbenv;
+ const DB_LSN *lsn;
+ char *namep;
+ size_t len;
+{
+ DB_LOG *dblp;
+ int ret;
+ char *name;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_file", DB_INIT_LOG);
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __log_name(dblp, lsn->file, &name, NULL, 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (ret != 0)
+ return (ret);
+
+ /* Check to make sure there's enough room and copy the name. */
+ if (len < strlen(name) + 1) {
+ *namep = '\0';
+ __db_err(dbenv, "DB_ENV->log_file: name buffer is too short");
+ return (EINVAL);
+ }
+ (void)strcpy(namep, name);
+ __os_free(dbenv, name);
+
+ return (0);
+}
+
+/*
+ * __log_newfh --
+ * Acquire a file handle for the current log file.
+ */
+static int
+__log_newfh(dblp)
+ DB_LOG *dblp;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ int ret;
+ char *name;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /* Close any previous file descriptor. */
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &dblp->lfh);
+
+ /*
+ * Get the path of the new file and open it.
+ *
+ * Adding DB_OSO_LOG to the flags may add additional platform-specific
+ * optimizations. On WinNT, the logfile is preallocated, which may
+ * have a time penalty at startup, but have better overall throughput.
+ * We are not certain that this works reliably, so enable at your own
+ * risk.
+ *
+ * XXX:
+ * Initialize the log file size. This is a hack to push the log's
+ * maximum size down into the Windows __os_open routine, because it
+ * wants to pre-allocate it.
+ */
+ dblp->lfname = lp->lsn.file;
+ dblp->lfh.log_size = lp->log_size;
+ if ((ret = __log_name(dblp, dblp->lfname,
+ &name, &dblp->lfh,
+ DB_OSO_CREATE |/* DB_OSO_LOG |*/ DB_OSO_SEQ |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0))) != 0)
+ __db_err(dbenv,
+ "DB_ENV->log_put: %s: %s", name, db_strerror(ret));
+
+ __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __log_name --
+ * Return the log name for a particular file, and optionally open it.
+ *
+ * PUBLIC: int __log_name __P((DB_LOG *,
+ * PUBLIC: u_int32_t, char **, DB_FH *, u_int32_t));
+ */
+int
+__log_name(dblp, filenumber, namep, fhp, flags)
+ DB_LOG *dblp;
+ u_int32_t filenumber, flags;
+ char **namep;
+ DB_FH *fhp;
+{
+ DB_ENV *dbenv;
+ LOG *lp;
+ int ret;
+ char *oname;
+ char old[sizeof(LFPREFIX) + 5 + 20], new[sizeof(LFPREFIX) + 10 + 20];
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * !!!
+ * The semantics of this routine are bizarre.
+ *
+ * The reason for all of this is that we need a place where we can
+ * intercept requests for log files, and, if appropriate, check for
+ * both the old-style and new-style log file names. The trick is
+ * that all callers of this routine that are opening the log file
+ * read-only want to use an old-style file name if they can't find
+ * a match using a new-style name. The only down-side is that some
+ * callers may check for the old-style when they really don't need
+ * to, but that shouldn't mess up anything, and we only check for
+ * the old-style name when we've already failed to find a new-style
+ * one.
+ *
+ * Create a new-style file name, and if we're not going to open the
+ * file, return regardless.
+ */
+ (void)snprintf(new, sizeof(new), LFNAME, filenumber);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_LOG, new, 0, NULL, namep)) != 0 || fhp == NULL)
+ return (ret);
+
+ /* Open the new-style file -- if we succeed, we're done. */
+ if ((ret = __os_open(dbenv, *namep, flags, lp->persist.mode, fhp)) == 0)
+ return (0);
+
+ /*
+ * The open failed... if the DB_RDONLY flag isn't set, we're done,
+ * the caller isn't interested in old-style files.
+ */
+ if (!LF_ISSET(DB_OSO_RDONLY)) {
+ __db_err(dbenv,
+ "%s: log file open failed: %s", *namep, db_strerror(ret));
+ return (__db_panic(dbenv, ret));
+ }
+
+ /* Create an old-style file name. */
+ (void)snprintf(old, sizeof(old), LFNAME_V1, filenumber);
+ if ((ret = __db_appname(dbenv, DB_APP_LOG, old, 0, NULL, &oname)) != 0)
+ goto err;
+
+ /*
+ * Open the old-style file -- if we succeed, we're done. Free the
+ * space allocated for the new-style name and return the old-style
+ * name to the caller.
+ */
+ if ((ret = __os_open(dbenv,
+ oname, flags, lp->persist.mode, fhp)) == 0) {
+ __os_free(dbenv, *namep);
+ *namep = oname;
+ return (0);
+ }
+
+ /*
+ * Couldn't find either style of name -- return the new-style name
+ * for the caller's error message. If it's an old-style name that's
+ * actually missing we're going to confuse the user with the error
+ * message, but that implies that not only were we looking for an
+ * old-style name, but we expected it to exist and we weren't just
+ * looking for any log file. That's not a likely error.
+ */
+err: __os_free(dbenv, oname);
+ return (ret);
+}
+
+/*
+ * __log_rep_put --
+ * Short-circuit way for replication clients to put records into the
+ * log. Replication clients' logs need to be laid out exactly their masters'
+ * are, so we let replication take responsibility for when the log gets
+ * flushed, when log switches files, etc. This is just a thin PUBLIC wrapper
+ * for __log_putr with a slightly prettier interface.
+ *
+ * Note that the log region mutex should be held when this is called.
+ *
+ * PUBLIC: int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
+ */
+int
+__log_rep_put(dbenv, lsnp, rec)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ const DBT *rec;
+{
+ DB_CIPHER *db_cipher;
+ DB_LOG *dblp;
+ HDR hdr;
+ DBT *dbt, t;
+ LOG *lp;
+ int need_free, ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ memset(&hdr, 0, sizeof(HDR));
+ t = *rec;
+ dbt = &t;
+ need_free = 0;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(rec->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, rec->data, rec->size);
+
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, rec->size)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+
+ DB_ASSERT(log_compare(lsnp, &lp->lsn) == 0);
+ ret = __log_putr(dblp, lsnp, dbt, lp->lsn.offset - lp->len, &hdr);
+err:
+ if (need_free)
+ __os_free(dbenv, t.data);
+ return (ret);
+}
+
+static int
+__log_encrypt_record(dbenv, dbt, hdr, orig)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ HDR *hdr;
+ u_int32_t orig;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ if (CRYPTO_ON(dbenv)) {
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ hdr->size = HDR_CRYPTO_SZ;
+ hdr->orig_size = orig;
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ hdr->iv, dbt->data, dbt->size)) != 0)
+ return (ret);
+ } else {
+ hdr->size = HDR_NORMAL_SZ;
+ }
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_alloc.c b/storage/bdb/mp/mp_alloc.c
new file mode 100644
index 00000000000..96dd612d7ba
--- /dev/null
+++ b/storage/bdb/mp/mp_alloc.c
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_alloc.c,v 11.31 2002/08/14 17:21:37 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+typedef struct {
+ DB_MPOOL_HASH *bucket;
+ u_int32_t priority;
+} HS;
+
+static void __memp_bad_buffer __P((DB_MPOOL_HASH *));
+static void __memp_reset_lru __P((DB_ENV *, REGINFO *, MPOOL *));
+
+/*
+ * __memp_alloc --
+ * Allocate some space from a cache region.
+ *
+ * PUBLIC: int __memp_alloc __P((DB_MPOOL *,
+ * PUBLIC: REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+ */
+int
+__memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
+ DB_MPOOL *dbmp;
+ REGINFO *memreg;
+ MPOOLFILE *mfp;
+ size_t len;
+ roff_t *offsetp;
+ void *retp;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL_HASH *dbht, *hp, *hp_end, *hp_tmp;
+ DB_MUTEX *mutexp;
+ MPOOL *c_mp;
+ MPOOLFILE *bh_mfp;
+ size_t freed_space;
+ u_int32_t buckets, buffers, high_priority, max_na, priority;
+ int aggressive, ret;
+ void *p;
+
+ dbenv = dbmp->dbenv;
+ c_mp = memreg->primary;
+ dbht = R_ADDR(memreg, c_mp->htab);
+ hp_end = &dbht[c_mp->htab_buckets];
+
+ buckets = buffers = 0;
+ aggressive = 0;
+
+ c_mp->stat.st_alloc++;
+
+ /*
+ * Get aggressive if we've tried to flush the number of pages as are
+ * in the system without finding space.
+ */
+ max_na = 5 * c_mp->htab_buckets;
+
+ /*
+ * If we're allocating a buffer, and the one we're discarding is the
+ * same size, we don't want to waste the time to re-integrate it into
+ * the shared memory free list. If the DB_MPOOLFILE argument isn't
+ * NULL, we'll compare the underlying page sizes of the two buffers
+ * before free-ing and re-allocating buffers.
+ */
+ if (mfp != NULL)
+ len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
+
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * On every buffer allocation we update the buffer generation number
+ * and check for wraparound.
+ */
+ if (++c_mp->lru_count == UINT32_T_MAX)
+ __memp_reset_lru(dbenv, memreg, c_mp);
+
+ /*
+ * Anything newer than 1/10th of the buffer pool is ignored during
+ * allocation (unless allocation starts failing).
+ */
+ DB_ASSERT(c_mp->lru_count > c_mp->stat.st_pages / 10);
+ high_priority = c_mp->lru_count - c_mp->stat.st_pages / 10;
+
+ /*
+ * First we try to allocate from free memory. If that fails, scan the
+ * buffer pool to find buffers with low priorities. We consider small
+ * sets of hash buckets each time to limit the amount of work needing
+ * to be done. This approximates LRU, but not very well. We either
+ * find a buffer of the same size to use, or we will free 3 times what
+ * we need in the hopes it will coalesce into a contiguous chunk of the
+ * right size. In the latter case we branch back here and try again.
+ */
+alloc: if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) {
+ if (mfp != NULL)
+ c_mp->stat.st_pages++;
+ R_UNLOCK(dbenv, memreg);
+
+found: if (offsetp != NULL)
+ *offsetp = R_OFFSET(memreg, p);
+ *(void **)retp = p;
+
+ /*
+ * Update the search statistics.
+ *
+ * We're not holding the region locked here, these statistics
+ * can't be trusted.
+ */
+ if (buckets != 0) {
+ if (buckets > c_mp->stat.st_alloc_max_buckets)
+ c_mp->stat.st_alloc_max_buckets = buckets;
+ c_mp->stat.st_alloc_buckets += buckets;
+ }
+ if (buffers != 0) {
+ if (buffers > c_mp->stat.st_alloc_max_pages)
+ c_mp->stat.st_alloc_max_pages = buffers;
+ c_mp->stat.st_alloc_pages += buffers;
+ }
+ return (0);
+ }
+
+ /*
+ * We re-attempt the allocation every time we've freed 3 times what
+ * we need. Reset our free-space counter.
+ */
+ freed_space = 0;
+
+ /*
+ * Walk the hash buckets and find the next two with potentially useful
+ * buffers. Free the buffer with the lowest priority from the buckets'
+ * chains.
+ */
+ for (hp_tmp = NULL;;) {
+ /* Check for wrap around. */
+ hp = &dbht[c_mp->last_checked++];
+ if (hp >= hp_end) {
+ c_mp->last_checked = 0;
+
+ /*
+ * If we've gone through all of the hash buckets, try
+ * an allocation. If the cache is small, the old page
+ * size is small, and the new page size is large, we
+ * might have freed enough memory (but not 3 times the
+ * memory).
+ */
+ goto alloc;
+ }
+
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ /*
+ * The failure mode is when there are too many buffers we can't
+ * write or there's not enough memory in the system. We don't
+ * have a metric for deciding if allocation has no possible way
+ * to succeed, so we don't ever fail, we assume memory will be
+ * available if we wait long enough.
+ *
+ * Get aggressive if we've tried to flush 5 times the number of
+ * hash buckets as are in the system -- it's possible we have
+ * been repeatedly trying to flush the same buffers, although
+ * it's unlikely. Aggressive means:
+ *
+ * a: set a flag to attempt to flush high priority buffers as
+ * well as other buffers.
+ * b: sync the mpool to force out queue extent pages. While we
+ * might not have enough space for what we want and flushing
+ * is expensive, why not?
+ * c: sleep for a second -- hopefully someone else will run and
+ * free up some memory. Try to allocate memory too, in case
+ * the other thread returns its memory to the region.
+ * d: look at a buffer in every hash bucket rather than choose
+ * the more preferable of two.
+ *
+ * !!!
+ * This test ignores pathological cases like no buffers in the
+ * system -- that shouldn't be possible.
+ */
+ if ((++buckets % max_na) == 0) {
+ aggressive = 1;
+
+ R_UNLOCK(dbenv, memreg);
+
+ (void)__memp_sync_int(
+ dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
+
+ (void)__os_sleep(dbenv, 1, 0);
+
+ R_LOCK(dbenv, memreg);
+ goto alloc;
+ }
+
+ if (!aggressive) {
+ /* Skip high priority buckets. */
+ if (hp->hash_priority > high_priority)
+ continue;
+
+ /*
+ * Find two buckets and select the one with the lowest
+ * priority. Performance testing shows that looking
+ * at two improves the LRUness and looking at more only
+ * does a little better.
+ */
+ if (hp_tmp == NULL) {
+ hp_tmp = hp;
+ continue;
+ }
+ if (hp->hash_priority > hp_tmp->hash_priority)
+ hp = hp_tmp;
+ hp_tmp = NULL;
+ }
+
+ /* Remember the priority of the buffer we're looking for. */
+ priority = hp->hash_priority;
+
+ /* Unlock the region and lock the hash bucket. */
+ R_UNLOCK(dbenv, memreg);
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
+ /*
+ * The lowest priority page is first in the bucket, as they are
+ * maintained in sorted order.
+ *
+ * The buffer may have been freed or its priority changed while
+ * we switched from the region lock to the hash lock. If so,
+ * we have to restart. We will still take the first buffer on
+ * the bucket's list, though, if it has a low enough priority.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL ||
+ bhp->ref != 0 || bhp->priority > priority)
+ goto next_hb;
+
+ buffers++;
+
+ /* Find the associated MPOOLFILE. */
+ bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /* If the page is dirty, pin it and write it. */
+ ret = 0;
+ if (F_ISSET(bhp, BH_DIRTY)) {
+ ++bhp->ref;
+ ret = __memp_bhwrite(dbmp, hp, bh_mfp, bhp, 0);
+ --bhp->ref;
+ if (ret == 0)
+ ++c_mp->stat.st_rw_evict;
+ } else
+ ++c_mp->stat.st_ro_evict;
+
+ /*
+ * If a write fails for any reason, we can't proceed.
+ *
+ * We released the hash bucket lock while doing I/O, so another
+ * thread may have acquired this buffer and incremented the ref
+ * count after we wrote it, in which case we can't have it.
+ *
+ * If there's a write error, avoid selecting this buffer again
+ * by making it the bucket's least-desirable buffer.
+ */
+ if (ret != 0 || bhp->ref != 0) {
+ if (ret != 0 && aggressive)
+ __memp_bad_buffer(hp);
+ goto next_hb;
+ }
+
+ /*
+ * Check to see if the buffer is the size we're looking for.
+ * If so, we can simply reuse it. Else, free the buffer and
+ * its space and keep looking.
+ */
+ if (mfp != NULL &&
+ mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
+ __memp_bhfree(dbmp, hp, bhp, 0);
+
+ p = bhp;
+ goto found;
+ }
+
+ freed_space += __db_shsizeof(bhp);
+ __memp_bhfree(dbmp, hp, bhp, 1);
+
+ /*
+ * Unlock this hash bucket and re-acquire the region lock. If
+ * we're reaching here as a result of calling memp_bhfree, the
+ * hash bucket lock has already been discarded.
+ */
+ if (0) {
+next_hb: MUTEX_UNLOCK(dbenv, mutexp);
+ }
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * Retry the allocation as soon as we've freed up sufficient
+ * space. We're likely to have to coalesce of memory to
+ * satisfy the request, don't try until it's likely (possible?)
+ * we'll succeed.
+ */
+ if (freed_space >= 3 * len)
+ goto alloc;
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __memp_bad_buffer --
+ * Make the first buffer in a hash bucket the least desirable buffer.
+ */
+static void
+__memp_bad_buffer(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp, *t_bhp;
+ u_int32_t priority;
+
+ /* Remove the first buffer from the bucket. */
+ bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+
+ /*
+ * Find the highest priority buffer in the bucket. Buffers are
+ * sorted by priority, so it's the last one in the bucket.
+ *
+ * XXX
+ * Should use SH_TAILQ_LAST, but I think that macro is broken.
+ */
+ priority = bhp->priority;
+ for (t_bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ t_bhp != NULL; t_bhp = SH_TAILQ_NEXT(t_bhp, hq, __bh))
+ priority = t_bhp->priority;
+
+ /*
+ * Set our buffer's priority to be just as bad, and append it to
+ * the bucket.
+ */
+ bhp->priority = priority;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+}
+
+/*
+ * __memp_reset_lru --
+ * Reset the cache LRU counter.
+ */
+static void
+__memp_reset_lru(dbenv, memreg, c_mp)
+ DB_ENV *dbenv;
+ REGINFO *memreg;
+ MPOOL *c_mp;
+{
+ BH *bhp;
+ DB_MPOOL_HASH *hp;
+ int bucket;
+
+ /*
+ * Update the counter so all future allocations will start at the
+ * bottom.
+ */
+ c_mp->lru_count -= MPOOL_BASE_DECREMENT;
+
+ /* Release the region lock. */
+ R_UNLOCK(dbenv, memreg);
+
+ /* Adjust the priority of every buffer in the system. */
+ for (hp = R_ADDR(memreg, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority != UINT32_T_MAX &&
+ bhp->priority > MPOOL_BASE_DECREMENT)
+ bhp->priority -= MPOOL_BASE_DECREMENT;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+
+ /* Reacquire the region lock. */
+ R_LOCK(dbenv, memreg);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __memp_check_order --
+ * Verify the priority ordering of a hash bucket chain.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __memp_check_order __P((DB_MPOOL_HASH *));
+ * PUBLIC: #endif
+ */
+void
+__memp_check_order(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp;
+ u_int32_t priority;
+
+ /*
+ * Assumes the hash bucket is locked.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL)
+ return;
+
+ DB_ASSERT(bhp->priority == hp->hash_priority);
+
+ for (priority = bhp->priority;
+ (bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) != NULL;
+ priority = bhp->priority)
+ DB_ASSERT(priority <= bhp->priority);
+}
+#endif
diff --git a/storage/bdb/mp/mp_bh.c b/storage/bdb/mp/mp_bh.c
new file mode 100644
index 00000000000..85d15218abf
--- /dev/null
+++ b/storage/bdb/mp/mp_bh.c
@@ -0,0 +1,646 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_bh.c,v 11.71 2002/09/04 19:06:45 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+
+static int __memp_pgwrite
+ __P((DB_MPOOL *, DB_MPOOLFILE *, DB_MPOOL_HASH *, BH *));
+static int __memp_upgrade __P((DB_MPOOL *, DB_MPOOLFILE *, MPOOLFILE *));
+
+/*
+ * __memp_bhwrite --
+ * Write the page associated with a given buffer header.
+ *
+ * PUBLIC: int __memp_bhwrite __P((DB_MPOOL *,
+ * PUBLIC: DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+ */
+int
+__memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOLFILE *mfp;
+ BH *bhp;
+ int open_extents;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ int local_open, incremented, ret;
+
+ dbenv = dbmp->dbenv;
+ local_open = incremented = 0;
+
+ /*
+ * If the file has been removed or is a closed temporary file, jump
+ * right ahead and pretend that we've found the file we want -- the
+ * page-write function knows how to handle the fact that we don't have
+ * (or need!) any real file descriptor information.
+ */
+ if (F_ISSET(mfp, MP_DEADFILE)) {
+ dbmfp = NULL;
+ goto found;
+ }
+
+ /*
+ * Walk the process' DB_MPOOLFILE list and find a file descriptor for
+ * the file. We also check that the descriptor is open for writing.
+ * If we find a descriptor on the file that's not open for writing, we
+ * try and upgrade it to make it writeable. If that fails, we're done.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ if (F_ISSET(dbmfp, MP_READONLY) &&
+ !F_ISSET(dbmfp, MP_UPGRADE) &&
+ (F_ISSET(dbmfp, MP_UPGRADE_FAIL) ||
+ __memp_upgrade(dbmp, dbmfp, mfp))) {
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ return (EPERM);
+ }
+
+ /*
+ * Increment the reference count -- see the comment in
+ * __memp_fclose_int().
+ */
+ ++dbmfp->ref;
+ incremented = 1;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (dbmfp != NULL)
+ goto found;
+
+ /*
+ * !!!
+ * It's the caller's choice if we're going to open extent files.
+ */
+ if (!open_extents && F_ISSET(mfp, MP_EXTENT))
+ return (EPERM);
+
+ /*
+ * !!!
+ * Don't try to attach to temporary files. There are two problems in
+ * trying to do that. First, if we have different privileges than the
+ * process that "owns" the temporary file, we might create the backing
+ * disk file such that the owning process couldn't read/write its own
+ * buffers, e.g., memp_trickle running as root creating a file owned
+ * as root, mode 600. Second, if the temporary file has already been
+ * created, we don't have any way of finding out what its real name is,
+ * and, even if we did, it was already unlinked (so that it won't be
+ * left if the process dies horribly). This decision causes a problem,
+ * however: if the temporary file consumes the entire buffer cache,
+ * and the owner doesn't flush the buffers to disk, we could end up
+ * with resource starvation, and the memp_trickle thread couldn't do
+ * anything about it. That's a pretty unlikely scenario, though.
+ *
+ * Note we should never get here when the temporary file in question
+ * has already been closed in another process, in which case it should
+ * be marked MP_DEADFILE.
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ return (EPERM);
+
+ /*
+ * It's not a page from a file we've opened. If the file requires
+ * input/output processing, see if this process has ever registered
+ * information as to how to write this type of file. If not, there's
+ * nothing we can do.
+ */
+ if (mfp->ftype != 0) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == mfp->ftype)
+ break;
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (mpreg == NULL)
+ return (EPERM);
+ }
+
+ /*
+ * Try and open the file, attaching to the underlying shared area.
+ * Ignore any error, assume it's a permissions problem.
+ *
+ * XXX
+ * There's no negative cache, so we may repeatedly try and open files
+ * that we have previously tried (and failed) to open.
+ */
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ return (ret);
+ if ((ret = __memp_fopen_int(dbmfp, mfp,
+ R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize)) != 0) {
+ (void)dbmfp->close(dbmfp, 0);
+ return (ret);
+ }
+ local_open = 1;
+
+found: ret = __memp_pgwrite(dbmp, dbmfp, hp, bhp);
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (incremented)
+ --dbmfp->ref;
+ else if (local_open)
+ F_SET(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __memp_pgread --
+ * Read a page from a file.
+ *
+ * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
+ */
+int
+__memp_pgread(dbmfp, mutexp, bhp, can_create)
+ DB_MPOOLFILE *dbmfp;
+ DB_MUTEX *mutexp;
+ BH *bhp;
+ int can_create;
+{
+ DB_IO db_io;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ size_t len, nr, pagesize;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+ pagesize = mfp->stat.st_pagesize;
+
+ /* We should never be called with a dirty or a locked buffer. */
+ DB_ASSERT(!F_ISSET(bhp, BH_DIRTY | BH_DIRTY_CREATE | BH_LOCKED));
+
+ /* Lock the buffer and swap the hash bucket lock for the buffer lock. */
+ F_SET(bhp, BH_LOCKED | BH_TRASH);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ /*
+ * Temporary files may not yet have been created. We don't create
+ * them now, we create them when the pages have to be flushed.
+ */
+ nr = 0;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+
+ /*
+ * The page may not exist; if it doesn't, nr may well be 0,
+ * but we expect the underlying OS calls not to return an
+ * error code in this case.
+ */
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_READ, &nr)) != 0)
+ goto err;
+ }
+
+ if (nr < pagesize) {
+ /*
+ * Don't output error messages for short reads. In particular,
+ * DB recovery processing may request pages never written to
+ * disk or for which only some part have been written to disk,
+ * in which case we won't find the page. The caller must know
+ * how to handle the error.
+ */
+ if (can_create == 0) {
+ ret = DB_PAGE_NOTFOUND;
+ goto err;
+ }
+
+ /* Clear any bytes that need to be cleared. */
+ len = mfp->clear_len == 0 ? pagesize : mfp->clear_len;
+ memset(bhp->buf, 0, len);
+
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ /*
+ * If we're running in diagnostic mode, corrupt any bytes on
+ * the page that are unknown quantities for the caller.
+ */
+ if (len < pagesize)
+ memset(bhp->buf + len, CLEAR_BYTE, pagesize - len);
+#endif
+ ++mfp->stat.st_page_create;
+ } else
+ ++mfp->stat.st_page_in;
+
+ /* Call any pgin function. */
+ ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp, 1);
+
+ /* Unlock the buffer and reacquire the hash bucket lock. */
+err: MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, mutexp);
+
+ /*
+ * If no errors occurred, the data is now valid, clear the BH_TRASH
+ * flag; regardless, clear the lock bit and let other threads proceed.
+ */
+ F_CLR(bhp, BH_LOCKED);
+ if (ret == 0)
+ F_CLR(bhp, BH_TRASH);
+
+ return (ret);
+}
+
+/*
+ * __memp_pgwrite --
+ * Write a page to a file.
+ */
+static int
+__memp_pgwrite(dbmp, dbmfp, hp, bhp)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPOOL_HASH *hp;
+ BH *bhp;
+{
+ DB_ENV *dbenv;
+ DB_IO db_io;
+ DB_LSN lsn;
+ MPOOLFILE *mfp;
+ size_t nw;
+ int callpgin, ret;
+
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp == NULL ? NULL : dbmfp->mfp;
+ callpgin = ret = 0;
+
+ /*
+ * We should never be called with a clean or trash buffer.
+ * The sync code does call us with already locked buffers.
+ */
+ DB_ASSERT(F_ISSET(bhp, BH_DIRTY));
+ DB_ASSERT(!F_ISSET(bhp, BH_TRASH));
+
+ /*
+ * If we have not already traded the hash bucket lock for the buffer
+ * lock, do so now.
+ */
+ if (!F_ISSET(bhp, BH_LOCKED)) {
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+
+ /*
+ * It's possible that the underlying file doesn't exist, either
+ * because of an outright removal or because it was a temporary
+ * file that's been closed.
+ *
+ * !!!
+ * Once we pass this point, we know that dbmfp and mfp aren't NULL,
+ * and that we have a valid file reference.
+ */
+ if (mfp == NULL || F_ISSET(mfp, MP_DEADFILE))
+ goto file_dead;
+
+ /*
+ * If the page is in a file for which we have LSN information, we have
+ * to ensure the appropriate log records are on disk.
+ */
+ if (LOGGING_ON(dbenv) && mfp->lsn_off != -1) {
+ memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
+ if ((ret = dbenv->log_flush(dbenv, &lsn)) != 0)
+ goto err;
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * Verify write-ahead logging semantics.
+ *
+ * !!!
+ * One special case. There is a single field on the meta-data page,
+ * the last-page-number-in-the-file field, for which we do not log
+ * changes. If the page was originally created in a database that
+ * didn't have logging turned on, we can see a page marked dirty but
+ * for which no corresponding log record has been written. However,
+ * the only way that a page can be created for which there isn't a
+ * previous log record and valid LSN is when the page was created
+ * without logging turned on, and so we check for that special-case
+ * LSN value.
+ */
+ if (LOGGING_ON(dbenv) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf))) {
+ /*
+ * There is a potential race here. If we are in the midst of
+ * switching log files, it's possible we could test against the
+ * old file and the new offset in the log region's LSN. If we
+ * fail the first test, acquire the log mutex and check again.
+ */
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ if (!IS_NOT_LOGGED_LSN(LSN(bhp->buf)) &&
+ log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ DB_ASSERT(log_compare(&lp->s_lsn, &LSN(bhp->buf)) > 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ }
+#endif
+
+ /*
+ * Call any pgout function. We set the callpgin flag so that we flag
+ * that the contents of the buffer will need to be passed through pgin
+ * before they are reused.
+ */
+ if (mfp->ftype != 0) {
+ callpgin = 1;
+ if ((ret = __memp_pg(dbmfp, bhp, 0)) != 0)
+ goto err;
+ }
+
+ /* Temporary files may not yet have been created. */
+ if (!F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ ret = F_ISSET(dbmfp->fhp, DB_FH_VALID) ? 0 :
+ __db_appname(dbenv, DB_APP_TMP, NULL,
+ F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_OSO_DIRECT : 0,
+ dbmfp->fhp, NULL);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0) {
+ __db_err(dbenv,
+ "unable to create temporary backing file");
+ goto err;
+ }
+ }
+
+ /* Write the page. */
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.pgno = bhp->pgno;
+ db_io.buf = bhp->buf;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ __db_err(dbenv, "%s: write failed for page %lu",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ goto err;
+ }
+ ++mfp->stat.st_page_out;
+
+err:
+file_dead:
+ /*
+ * !!!
+ * Once we pass this point, dbmfp and mfp may be NULL, we may not have
+ * a valid file reference.
+ *
+ * Unlock the buffer and reacquire the hash lock.
+ */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * If we rewrote the page, it will need processing by the pgin
+ * routine before reuse.
+ */
+ if (callpgin)
+ F_SET(bhp, BH_CALLPGIN);
+
+ /*
+ * Update the hash bucket statistics, reset the flags.
+ * If we were successful, the page is no longer dirty.
+ */
+ if (ret == 0) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+
+ F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+ }
+
+ /* Regardless, clear any sync wait-for count and remove our lock. */
+ bhp->ref_sync = 0;
+ F_CLR(bhp, BH_LOCKED);
+
+ return (ret);
+}
+
+/*
+ * __memp_pg --
+ * Call the pgin/pgout routine.
+ *
+ * PUBLIC: int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+ */
+int
+__memp_pg(dbmfp, bhp, is_pgin)
+ DB_MPOOLFILE *dbmfp;
+ BH *bhp;
+ int is_pgin;
+{
+ DBT dbt, *dbtp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ MPOOLFILE *mfp;
+ int ftype, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mfp = dbmfp->mfp;
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+
+ ftype = mfp->ftype;
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q)) {
+ if (ftype != mpreg->ftype)
+ continue;
+ if (mfp->pgcookie_len == 0)
+ dbtp = NULL;
+ else {
+ dbt.size = mfp->pgcookie_len;
+ dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
+ dbtp = &dbt;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (is_pgin) {
+ if (mpreg->pgin != NULL &&
+ (ret = mpreg->pgin(dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ } else
+ if (mpreg->pgout != NULL &&
+ (ret = mpreg->pgout(dbenv,
+ bhp->pgno, bhp->buf, dbtp)) != 0)
+ goto err;
+ break;
+ }
+
+ if (mpreg == NULL)
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), is_pgin ? "pgin" : "pgout", (u_long)bhp->pgno);
+ return (ret);
+}
+
+/*
+ * __memp_bhfree --
+ * Free a bucket header and its referenced data.
+ *
+ * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int));
+ */
+void
+__memp_bhfree(dbmp, hp, bhp, free_mem)
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ BH *bhp;
+ int free_mem;
+{
+ DB_ENV *dbenv;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ u_int32_t n_cache;
+
+ /*
+ * Assumes the hash bucket is locked and the MPOOL is not.
+ */
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ n_cache = NCACHE(mp, bhp->mf_offset, bhp->pgno);
+
+ /*
+ * Delete the buffer header from the hash bucket queue and reset
+ * the hash bucket's priority, if necessary.
+ */
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ if (bhp->priority == hp->hash_priority)
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL ?
+ 0 : SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+ /*
+ * Discard the hash bucket's mutex, it's no longer needed, and
+ * we don't want to be holding it when acquiring other locks.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * Find the underlying MPOOLFILE and decrement its reference count.
+ * If this is its last reference, remove it.
+ */
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
+ __memp_mf_discard(dbmp, mfp);
+ else
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+
+ /*
+ * Clear the mutex this buffer recorded; requires the region lock
+ * be held.
+ */
+ __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
+ (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
+
+ /*
+ * If we're not reusing the buffer immediately, free the buffer header
+ * and data for real.
+ */
+ if (free_mem) {
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, bhp);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ c_mp->stat.st_pages--;
+ }
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+}
+
+/*
+ * __memp_upgrade --
+ * Upgrade a file descriptor from read-only to read-write.
+ */
+static int
+__memp_upgrade(dbmp, dbmfp, mfp)
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+{
+ DB_ENV *dbenv;
+ DB_FH *fhp, *tfhp;
+ int ret;
+ char *rpath;
+
+ dbenv = dbmp->dbenv;
+ fhp = NULL;
+ rpath = NULL;
+
+ /*
+ * Calculate the real name for this file and try to open it read/write.
+ * We know we have a valid pathname for the file because it's the only
+ * way we could have gotten a file descriptor of any kind.
+ */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &fhp)) != 0)
+ goto err;
+
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0)
+ goto err;
+
+ if (__os_open(dbenv, rpath,
+ F_ISSET(mfp, MP_DIRECT) ? DB_OSO_DIRECT : 0, 0, fhp) != 0) {
+ F_SET(dbmfp, MP_UPGRADE_FAIL);
+ goto err;
+ }
+
+ /*
+ * Swap the descriptors and set the upgrade flag.
+ *
+ * XXX
+ * There is a race here. If another process schedules a read using the
+ * existing file descriptor and is swapped out before making the system
+ * call, this code could theoretically close the file descriptor out
+ * from under it. While it's very unlikely, this code should still be
+ * rewritten.
+ */
+ tfhp = dbmfp->fhp;
+ dbmfp->fhp = fhp;
+ fhp = tfhp;
+
+ (void)__os_closehandle(dbenv, fhp);
+ F_SET(dbmfp, MP_UPGRADE);
+
+ ret = 0;
+ if (0) {
+err: ret = 1;
+ }
+ if (fhp != NULL)
+ __os_free(dbenv, fhp);
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
+
+ return (ret);
+}
diff --git a/storage/bdb/mp/mp_fget.c b/storage/bdb/mp/mp_fget.c
new file mode 100644
index 00000000000..be0785a2184
--- /dev/null
+++ b/storage/bdb/mp/mp_fget.c
@@ -0,0 +1,654 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fget.c,v 11.68 2002/08/06 04:58:09 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+static int __memp_fs_notzero
+ __P((DB_ENV *, DB_MPOOLFILE *, MPOOLFILE *, db_pgno_t *));
+#endif
+
+/*
+ * __memp_fget --
+ * Get a page from the file.
+ *
+ * PUBLIC: int __memp_fget
+ * PUBLIC: __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ */
+int
+__memp_fget(dbmfp, pgnoaddr, flags, addrp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+ u_int32_t flags;
+ void *addrp;
+{
+ enum { FIRST_FOUND, FIRST_MISS, SECOND_FOUND, SECOND_MISS } state;
+ BH *alloc_bhp, *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ roff_t mf_offset;
+ u_int32_t n_cache, st_hsearch;
+ int b_incr, extending, first, ret;
+
+ *(void **)addrp = NULL;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ mp = dbmp->reginfo[0].primary;
+ mfp = dbmfp->mfp;
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ alloc_bhp = bhp = NULL;
+ hp = NULL;
+ b_incr = extending = ret = 0;
+
+ /*
+ * Validate arguments.
+ *
+ * !!!
+ * Don't test for DB_MPOOL_CREATE and DB_MPOOL_NEW flags for readonly
+ * files here, and create non-existent pages in readonly files if the
+ * flags are set, later. The reason is that the hash access method
+ * wants to get empty pages that don't really exist in readonly files.
+ * The only alternative is for hash to write the last "bucket" all the
+ * time, which we don't want to do because one of our big goals in life
+ * is to keep database files small. It's sleazy as hell, but we catch
+ * any attempt to actually write the file in memp_fput().
+ */
+#define OKFLAGS (DB_MPOOL_CREATE | DB_MPOOL_LAST | DB_MPOOL_NEW)
+ if (flags != 0) {
+ if ((ret = __db_fchk(dbenv, "memp_fget", flags, OKFLAGS)) != 0)
+ return (ret);
+
+ switch (flags) {
+ case DB_MPOOL_CREATE:
+ break;
+ case DB_MPOOL_LAST:
+ /* Get the last page number in the file. */
+ if (flags == DB_MPOOL_LAST) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+ break;
+ case DB_MPOOL_NEW:
+ /*
+ * If always creating a page, skip the first search
+ * of the hash bucket.
+ */
+ if (flags == DB_MPOOL_NEW)
+ goto alloc;
+ break;
+ default:
+ return (__db_ferr(dbenv, "memp_fget", 1));
+ }
+ }
+
+ /*
+ * If mmap'ing the file and the page is not past the end of the file,
+ * just return a pointer.
+ *
+ * The page may be past the end of the file, so check the page number
+ * argument against the original length of the file. If we previously
+ * returned pages past the original end of the file, last_pgno will
+ * have been updated to match the "new" end of the file, and checking
+ * against it would return pointers past the end of the mmap'd region.
+ *
+ * If another process has opened the file for writing since we mmap'd
+ * it, we will start playing the game by their rules, i.e. everything
+ * goes through the cache. All pages previously returned will be safe,
+ * as long as the correct locking protocol was observed.
+ *
+ * We don't discard the map because we don't know when all of the
+ * pages will have been discarded from the process' address space.
+ * It would be possible to do so by reference counting the open
+ * pages from the mmap, but it's unclear to me that it's worth it.
+ */
+ if (dbmfp->addr != NULL &&
+ F_ISSET(mfp, MP_CAN_MMAP) && *pgnoaddr <= mfp->orig_last_pgno) {
+ *(void **)addrp =
+ R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
+ ++mfp->stat.st_map;
+ return (0);
+ }
+
+hb_search:
+ /*
+ * Determine the cache and hash bucket where this page lives and get
+ * local pointers to them. Reset on each pass through this code, the
+ * page number can change.
+ */
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, mf_offset, *pgnoaddr)];
+
+ /* Search the hash chain for the page. */
+retry: st_hsearch = 0;
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ ++st_hsearch;
+ if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
+ continue;
+
+ /*
+ * Increment the reference count. We may discard the hash
+ * bucket lock as we evaluate and/or read the buffer, so we
+ * need to ensure it doesn't move and its contents remain
+ * unchanged.
+ */
+ if (bhp->ref == UINT16_T_MAX) {
+ __db_err(dbenv,
+ "%s: page %lu: reference count overflow",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ ret = EINVAL;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ goto err;
+ }
+ ++bhp->ref;
+ b_incr = 1;
+
+ /*
+ * BH_LOCKED --
+ * I/O is in progress or sync is waiting on the buffer to write
+ * it. Because we've incremented the buffer reference count,
+ * we know the buffer can't move. Unlock the bucket lock, wait
+ * for the buffer to become available, reacquire the bucket.
+ */
+ for (first = 1; F_ISSET(bhp, BH_LOCKED) &&
+ !F_ISSET(dbenv, DB_ENV_NOLOCKING); first = 0) {
+ /*
+ * If someone is trying to sync this buffer and the
+ * buffer is hot, they may never get in. Give up
+ * and try again.
+ */
+ if (!first && bhp->ref_sync != 0) {
+ --bhp->ref;
+ b_incr = 0;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ __os_yield(dbenv, 1);
+ goto retry;
+ }
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ /*
+ * Explicitly yield the processor if not the first pass
+ * through this loop -- if we don't, we might run to the
+ * end of our CPU quantum as we will simply be swapping
+ * between the two locks.
+ */
+ if (!first)
+ __os_yield(dbenv, 1);
+
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ /* Wait for I/O to finish... */
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ }
+
+ ++mfp->stat.st_cache_hit;
+ break;
+ }
+
+ /*
+ * Update the hash bucket search statistics -- do now because our next
+ * search may be for a different bucket.
+ */
+ ++c_mp->stat.st_hash_searches;
+ if (st_hsearch > c_mp->stat.st_hash_longest)
+ c_mp->stat.st_hash_longest = st_hsearch;
+ c_mp->stat.st_hash_examined += st_hsearch;
+
+ /*
+ * There are 4 possible paths to this location:
+ *
+ * FIRST_MISS:
+ * Didn't find the page in the hash bucket on our first pass:
+ * bhp == NULL, alloc_bhp == NULL
+ *
+ * FIRST_FOUND:
+ * Found the page in the hash bucket on our first pass:
+ * bhp != NULL, alloc_bhp == NULL
+ *
+ * SECOND_FOUND:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and found the page in the hash bucket on
+ * our second pass:
+ * bhp != NULL, alloc_bhp != NULL
+ *
+ * SECOND_MISS:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and didn't find the page in the hash bucket
+ * on our second pass:
+ * bhp == NULL, alloc_bhp != NULL
+ */
+ state = bhp == NULL ?
+ (alloc_bhp == NULL ? FIRST_MISS : SECOND_MISS) :
+ (alloc_bhp == NULL ? FIRST_FOUND : SECOND_FOUND);
+ switch (state) {
+ case FIRST_FOUND:
+ /* We found the buffer in our first check -- we're done. */
+ break;
+ case FIRST_MISS:
+ /*
+ * We didn't find the buffer in our first check. Figure out
+ * if the page exists, and allocate structures so we can add
+ * the page to the buffer pool.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+alloc: /*
+ * If DB_MPOOL_NEW is set, we have to allocate a page number.
+ * If neither DB_MPOOL_CREATE or DB_MPOOL_CREATE is set, then
+ * it's an error to try and get a page past the end of file.
+ */
+ COMPQUIET(n_cache, 0);
+
+ extending = ret = 0;
+ R_LOCK(dbenv, dbmp->reginfo);
+ switch (flags) {
+ case DB_MPOOL_NEW:
+ extending = 1;
+ *pgnoaddr = mfp->last_pgno + 1;
+ break;
+ case DB_MPOOL_CREATE:
+ extending = *pgnoaddr > mfp->last_pgno;
+ break;
+ default:
+ ret = *pgnoaddr > mfp->last_pgno ? DB_PAGE_NOTFOUND : 0;
+ break;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+
+ /*
+ * !!!
+ * In the DB_MPOOL_NEW code path, mf_offset and n_cache have
+ * not yet been initialized.
+ */
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+
+ /* Allocate a new buffer header and data space. */
+ if ((ret = __memp_alloc(dbmp,
+ &dbmp->reginfo[n_cache], mfp, 0, NULL, &alloc_bhp)) != 0)
+ goto err;
+#ifdef DIAGNOSTIC
+ if ((db_alignp_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
+ __db_err(dbenv,
+ "Error: buffer data is NOT size_t aligned");
+ ret = EINVAL;
+ goto err;
+ }
+#endif
+ /*
+ * If we are extending the file, we'll need the region lock
+ * again.
+ */
+ if (extending)
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control. (That guarantee is interesting
+ * for DB_MPOOL_NEW, unlike DB_MPOOL_CREATE, because the caller
+ * did not specify the page number, and so, may reasonably not
+ * have any way to lock the page outside of mpool.) Regardless,
+ * if we allocate the page, and some other thread of control
+ * requests the page by number, we will not detect that and the
+ * thread of control that allocated using DB_MPOOL_NEW may not
+ * have a chance to initialize the page. (Note: we *could*
+ * detect this case if we set a flag in the buffer header which
+ * guaranteed that no gets of the page would succeed until the
+ * reference count went to 0, that is, until the creating page
+ * put the page.) What we do guarantee is that if two threads
+ * of control are both doing DB_MPOOL_NEW calls, they won't
+ * collide, that is, they won't both get the same page.
+ *
+ * There's a possibility that another thread allocated the page
+ * we were planning to allocate while we were off doing buffer
+ * allocation. We can do that by making sure the page number
+ * we were going to use is still available. If it's not, then
+ * we check to see if the next available page number hashes to
+ * the same mpool region as the old one -- if it does, we can
+ * continue, otherwise, we have to start over.
+ */
+ if (flags == DB_MPOOL_NEW && *pgnoaddr != mfp->last_pgno + 1) {
+ *pgnoaddr = mfp->last_pgno + 1;
+ if (n_cache != NCACHE(mp, mf_offset, *pgnoaddr)) {
+ __db_shalloc_free(
+ dbmp->reginfo[n_cache].addr, alloc_bhp);
+ /*
+ * flags == DB_MPOOL_NEW, so extending is set
+ * and we're holding the region locked.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ alloc_bhp = NULL;
+ goto alloc;
+ }
+ }
+
+ /*
+ * We released the region lock, so another thread might have
+ * extended the file. Update the last_pgno and initialize
+ * the file, as necessary, if we extended the file.
+ */
+ if (extending) {
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ if (*pgnoaddr > mfp->last_pgno &&
+ __os_fs_notzero() &&
+ F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ ret = __memp_fs_notzero(
+ dbenv, dbmfp, mfp, pgnoaddr);
+ else
+ ret = 0;
+#endif
+ if (ret == 0 && *pgnoaddr > mfp->last_pgno)
+ mfp->last_pgno = *pgnoaddr;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+ }
+ goto hb_search;
+ case SECOND_FOUND:
+ /*
+ * We allocated buffer space for the requested page, but then
+ * found the page in the buffer cache on our second check.
+ * That's OK -- we can use the page we found in the pool,
+ * unless DB_MPOOL_NEW is set.
+ *
+ * Free the allocated memory, we no longer need it. Since we
+ * can't acquire the region lock while holding the hash bucket
+ * lock, we have to release the hash bucket and re-acquire it.
+ * That's OK, because we have the buffer pinned down.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
+ alloc_bhp = NULL;
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /*
+ * We can't use the page we found in the pool if DB_MPOOL_NEW
+ * was set. (For details, see the above comment beginning
+ * "DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control".) If DB_MPOOL_NEW is set, we
+ * release our pin on this particular buffer, and try to get
+ * another one.
+ */
+ if (flags == DB_MPOOL_NEW) {
+ --bhp->ref;
+ b_incr = 0;
+ goto alloc;
+ }
+ break;
+ case SECOND_MISS:
+ /*
+ * We allocated buffer space for the requested page, and found
+ * the page still missing on our second pass through the buffer
+ * cache. Instantiate the page.
+ */
+ bhp = alloc_bhp;
+ alloc_bhp = NULL;
+
+ /*
+ * Initialize all the BH and hash bucket fields so we can call
+ * __memp_bhfree if an error occurs.
+ *
+ * Append the buffer to the tail of the bucket list and update
+ * the hash bucket's priority.
+ */
+ b_incr = 1;
+
+ memset(bhp, 0, sizeof(BH));
+ bhp->ref = 1;
+ bhp->priority = UINT32_T_MAX;
+ bhp->pgno = *pgnoaddr;
+ bhp->mf_offset = mf_offset;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+ /* If we extended the file, make sure the page is never lost. */
+ if (extending) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+ }
+
+ /*
+ * If we created the page, zero it out. If we didn't create
+ * the page, read from the backing file.
+ *
+ * !!!
+ * DB_MPOOL_NEW doesn't call the pgin function.
+ *
+ * If DB_MPOOL_CREATE is used, then the application's pgin
+ * function has to be able to handle pages of 0's -- if it
+ * uses DB_MPOOL_NEW, it can detect all of its page creates,
+ * and not bother.
+ *
+ * If we're running in diagnostic mode, smash any bytes on the
+ * page that are unknown quantities for the caller.
+ *
+ * Otherwise, read the page into memory, optionally creating it
+ * if DB_MPOOL_CREATE is set.
+ */
+ if (extending) {
+ if (mfp->clear_len == 0)
+ memset(bhp->buf, 0, mfp->stat.st_pagesize);
+ else {
+ memset(bhp->buf, 0, mfp->clear_len);
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,
+ mfp->stat.st_pagesize - mfp->clear_len);
+#endif
+ }
+
+ if (flags == DB_MPOOL_CREATE && mfp->ftype != 0)
+ F_SET(bhp, BH_CALLPGIN);
+
+ ++mfp->stat.st_page_create;
+ } else {
+ F_SET(bhp, BH_TRASH);
+ ++mfp->stat.st_cache_miss;
+ }
+
+ /* Increment buffer count referenced by MPOOLFILE. */
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->block_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /*
+ * Initialize the mutex. This is the last initialization step,
+ * because it's the only one that can fail, and everything else
+ * must be set up or we can't jump to the err label because it
+ * will call __memp_bhfree.
+ */
+ if ((ret = __db_mutex_setup(dbenv,
+ &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0)
+ goto err;
+ }
+
+ DB_ASSERT(bhp->ref != 0);
+
+ /*
+ * If we're the only reference, update buffer and bucket priorities.
+ * We may be about to release the hash bucket lock, and everything
+ * should be correct, first. (We've already done this if we created
+ * the buffer, so there is no need to do it again.)
+ */
+ if (state != SECOND_MISS && bhp->ref == 1) {
+ bhp->priority = UINT32_T_MAX;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+ }
+
+ /*
+ * BH_TRASH --
+ * The buffer we found may need to be filled from the disk.
+ *
+ * It's possible for the read function to fail, which means we fail as
+ * well. Note, the __memp_pgread() function discards and reacquires
+ * the hash lock, so the buffer must be pinned down so that it cannot
+ * move and its contents are unchanged. Discard the buffer on failure
+ * unless another thread is waiting on our I/O to complete. It's OK to
+ * leave the buffer around, as the waiting thread will see the BH_TRASH
+ * flag set, and will also attempt to discard it. If there's a waiter,
+ * we need to decrement our reference count.
+ */
+ if (F_ISSET(bhp, BH_TRASH) &&
+ (ret = __memp_pgread(dbmfp,
+ &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)
+ goto err;
+
+ /*
+ * BH_CALLPGIN --
+ * The buffer was processed for being written to disk, and now has
+ * to be re-converted for use.
+ */
+ if (F_ISSET(bhp, BH_CALLPGIN)) {
+ if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
+ goto err;
+ F_CLR(bhp, BH_CALLPGIN);
+ }
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+#ifdef DIAGNOSTIC
+ /* Update the file's pinned reference count. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ++dbmfp->pinref;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * We want to switch threads as often as possible, and at awkward
+ * times. Yield every time we get a new page to ensure contention.
+ */
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+ __os_yield(dbenv, 1);
+#endif
+
+ *(void **)addrp = bhp->buf;
+ return (0);
+
+err: /*
+ * Discard our reference. If we're the only reference, discard the
+ * the buffer entirely. If we held a reference to a buffer, we are
+ * also still holding the hash bucket mutex.
+ */
+ if (b_incr) {
+ if (bhp->ref == 1)
+ (void)__memp_bhfree(dbmp, hp, bhp, 1);
+ else {
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+ }
+
+ /* If alloc_bhp is set, free the memory. */
+ if (alloc_bhp != NULL)
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
+
+ return (ret);
+}
+
+#ifdef HAVE_FILESYSTEM_NOTZERO
+/*
+ * __memp_fs_notzero --
+ * Initialize the underlying allocated pages in the file.
+ */
+static int
+__memp_fs_notzero(dbenv, dbmfp, mfp, pgnoaddr)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_IO db_io;
+ u_int32_t i, npages;
+ size_t nw;
+ int ret;
+ u_int8_t *page;
+ char *fail;
+
+ /*
+ * Pages allocated by writing pages past end-of-file are not zeroed,
+ * on some systems. Recovery could theoretically be fooled by a page
+ * showing up that contained garbage. In order to avoid this, we
+ * have to write the pages out to disk, and flush them. The reason
+ * for the flush is because if we don't sync, the allocation of another
+ * page subsequent to this one might reach the disk first, and if we
+ * crashed at the right moment, leave us with this page as the one
+ * allocated by writing a page past it in the file.
+ *
+ * Hash is the only access method that allocates groups of pages. We
+ * know that it will use the existence of the last page in a group to
+ * signify that the entire group is OK; so, write all the pages but
+ * the last one in the group, flush them to disk, and then write the
+ * last one to disk and flush it.
+ */
+ if ((ret = __os_calloc(dbenv, 1, mfp->stat.st_pagesize, &page)) != 0)
+ return (ret);
+
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.buf = page;
+
+ npages = *pgnoaddr - mfp->last_pgno;
+ for (i = 1; i < npages; ++i) {
+ db_io.pgno = mfp->last_pgno + i;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ }
+ if (i != 1 && (ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+ goto err;
+ }
+
+ db_io.pgno = mfp->last_pgno + npages;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+err: __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), fail, (u_long)db_io.pgno);
+ }
+
+ __os_free(dbenv, page);
+ return (ret);
+}
+#endif
diff --git a/storage/bdb/mp/mp_fopen.c b/storage/bdb/mp/mp_fopen.c
new file mode 100644
index 00000000000..8fdefb0f5e9
--- /dev/null
+++ b/storage/bdb/mp/mp_fopen.c
@@ -0,0 +1,1018 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fopen.c,v 11.90 2002/08/26 15:22:01 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+static int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_fopen __P((DB_MPOOLFILE *,
+ const char *, u_int32_t, int, size_t));
+static void __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *));
+static void __memp_refcnt __P((DB_MPOOLFILE *, db_pgno_t *));
+static int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static int __memp_set_ftype __P((DB_MPOOLFILE *, int));
+static int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t));
+static int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *));
+static int __memp_set_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+static void __memp_set_unlink __P((DB_MPOOLFILE *, int));
+
+/* Initialization methods cannot be called after open is called. */
+#define MPF_ILLEGAL_AFTER_OPEN(dbmfp, name) \
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED)) \
+ return (__db_mi_open((dbmfp)->dbmp->dbenv, name, 1));
+
+/*
+ * __memp_fcreate --
+ * Create a DB_MPOOLFILE handle.
+ *
+ * PUBLIC: int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ */
+int
+__memp_fcreate(dbenv, retp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE **retp;
+ u_int32_t flags;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_fcreate", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fcreate", flags, 0)) != 0)
+ return (ret);
+
+ /* Allocate and initialize the per-process structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0)
+ return (ret);
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &dbmfp->fhp)) != 0)
+ goto err;
+
+ /* Allocate and initialize a mutex if necessary. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmfp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+
+ dbmfp->ref = 1;
+ dbmfp->lsn_offset = -1;
+ dbmfp->dbmp = dbmp;
+ dbmfp->mfp = INVALID_ROFF;
+
+ dbmfp->close = __memp_fclose;
+ dbmfp->get = __memp_fget;
+ dbmfp->get_fileid = __memp_get_fileid;
+ dbmfp->last_pgno = __memp_last_pgno;
+ dbmfp->open = __memp_fopen;
+ dbmfp->put = __memp_fput;
+ dbmfp->refcnt = __memp_refcnt;
+ dbmfp->set = __memp_fset;
+ dbmfp->set_clear_len = __memp_set_clear_len;
+ dbmfp->set_fileid = __memp_set_fileid;
+ dbmfp->set_ftype = __memp_set_ftype;
+ dbmfp->set_lsn_offset = __memp_set_lsn_offset;
+ dbmfp->set_pgcookie = __memp_set_pgcookie;
+ dbmfp->set_priority = __memp_set_priority;
+ dbmfp->set_unlink = __memp_set_unlink;
+ dbmfp->sync = __memp_fsync;
+
+ *retp = dbmfp;
+ return (0);
+
+err: if (dbmfp != NULL) {
+ if (dbmfp->fhp != NULL)
+ (void)__os_free(dbenv, dbmfp->fhp);
+ (void)__os_free(dbenv, dbmfp);
+ }
+ return (ret);
+}
+
+/*
+ * __memp_set_clear_len --
+ * Set the clear length.
+ */
+static int
+__memp_set_clear_len(dbmfp, clear_len)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t clear_len;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_clear_len");
+
+ dbmfp->clear_len = clear_len;
+ return (0);
+}
+
+/*
+ * __memp_set_fileid --
+ * Set the file ID.
+ */
+static int
+__memp_set_fileid(dbmfp, fileid)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fileid;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_fileid");
+
+ /*
+ * XXX
+ * This is dangerous -- we're saving the caller's pointer instead
+ * of allocating memory and copying the contents.
+ */
+ dbmfp->fileid = fileid;
+ return (0);
+}
+
+/*
+ * __memp_set_ftype --
+ * Set the file type (as registered).
+ */
+static int
+__memp_set_ftype(dbmfp, ftype)
+ DB_MPOOLFILE *dbmfp;
+ int ftype;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_ftype");
+
+ dbmfp->ftype = ftype;
+ return (0);
+}
+
+/*
+ * __memp_set_lsn_offset --
+ * Set the page's LSN offset.
+ */
+static int
+__memp_set_lsn_offset(dbmfp, lsn_offset)
+ DB_MPOOLFILE *dbmfp;
+ int32_t lsn_offset;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_lsn_offset");
+
+ dbmfp->lsn_offset = lsn_offset;
+ return (0);
+}
+
+/*
+ * __memp_set_pgcookie --
+ * Set the pgin/pgout cookie.
+ */
+static int
+__memp_set_pgcookie(dbmfp, pgcookie)
+ DB_MPOOLFILE *dbmfp;
+ DBT *pgcookie;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_pgcookie");
+
+ dbmfp->pgcookie = pgcookie;
+ return (0);
+}
+
+/*
+ * __memp_set_priority --
+ * Set the cache priority for pages from this file.
+ */
+static int
+__memp_set_priority(dbmfp, priority)
+ DB_MPOOLFILE *dbmfp;
+ DB_CACHE_PRIORITY priority;
+{
+ switch (priority) {
+ case DB_PRIORITY_VERY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_LOW;
+ break;
+ case DB_PRIORITY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_LOW;
+ break;
+ case DB_PRIORITY_DEFAULT:
+ dbmfp->mfp->priority = MPOOL_PRI_DEFAULT;
+ break;
+ case DB_PRIORITY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_HIGH;
+ break;
+ case DB_PRIORITY_VERY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_HIGH;
+ break;
+ default:
+ __db_err(dbmfp->dbmp->dbenv,
+ "Unknown priority value: %d", priority);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_fopen --
+ * Open a backing file for the memory pool.
+ */
+static int
+__memp_fopen(dbmfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fopen", flags,
+ DB_CREATE | DB_DIRECT | DB_EXTENT |
+ DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE)) != 0)
+ return (ret);
+
+ /*
+ * Require a non-zero, power-of-two pagesize, smaller than the
+ * clear length.
+ */
+ if (pagesize == 0 || !POWER_OF_TWO(pagesize)) {
+ __db_err(dbenv,
+ "memp_fopen: page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+ if (dbmfp->clear_len > pagesize) {
+ __db_err(dbenv,
+ "memp_fopen: clear length larger than page size");
+ return (EINVAL);
+ }
+
+ /* Read-only checks, and local flag. */
+ if (LF_ISSET(DB_RDONLY) && path == NULL) {
+ __db_err(dbenv,
+ "memp_fopen: temporary files can't be readonly");
+ return (EINVAL);
+ }
+
+ return (__memp_fopen_int(dbmfp, NULL, path, flags, mode, pagesize));
+}
+
+/*
+ * __memp_fopen_int --
+ * Open a backing file for the memory pool; internal version.
+ *
+ * PUBLIC: int __memp_fopen_int __P((DB_MPOOLFILE *,
+ * PUBLIC: MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ */
+int
+__memp_fopen_int(dbmfp, mfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ db_pgno_t last_pgno;
+ size_t maxmap;
+ u_int32_t mbytes, bytes, oflags;
+ int mfp_alloc, ret;
+ u_int8_t idbuf[DB_FILE_ID_LEN];
+ char *rpath;
+ void *p;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ mfp_alloc = ret = 0;
+ rpath = NULL;
+
+ /*
+ * Set the page size so os_open can decide whether to turn buffering
+ * off if the DB_DIRECT_DB flag is set.
+ */
+ dbmfp->fhp->pagesize = (u_int32_t)pagesize;
+
+ /*
+ * If it's a temporary file, delay the open until we actually need
+ * to write the file, and we know we can't join any existing files.
+ */
+ if (path == NULL)
+ goto alloc;
+
+ /*
+ * Get the real name for this file and open it. If it's a Queue extent
+ * file, it may not exist, and that's OK.
+ */
+ oflags = 0;
+ if (LF_ISSET(DB_CREATE))
+ oflags |= DB_OSO_CREATE;
+ if (LF_ISSET(DB_DIRECT))
+ oflags |= DB_OSO_DIRECT;
+ if (LF_ISSET(DB_RDONLY)) {
+ F_SET(dbmfp, MP_READONLY);
+ oflags |= DB_OSO_RDONLY;
+ }
+ if ((ret =
+ __db_appname(dbenv, DB_APP_DATA, path, 0, NULL, &rpath)) != 0)
+ goto err;
+ if ((ret = __os_open(dbenv, rpath, oflags, mode, dbmfp->fhp)) != 0) {
+ if (!LF_ISSET(DB_EXTENT))
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Figure out the file's size.
+ *
+ * !!!
+ * We can't use off_t's here, or in any code in the mainline library
+ * for that matter. (We have to use them in the os stubs, of course,
+ * as there are system calls that take them as arguments.) The reason
+ * is some customers build in environments where an off_t is 32-bits,
+ * but still run where offsets are 64-bits, and they pay us a lot of
+ * money.
+ */
+ if ((ret = __os_ioinfo(
+ dbenv, rpath, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0) {
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
+
+ /*
+ * Get the file id if we weren't given one. Generated file id's
+ * don't use timestamps, otherwise there'd be no chance of any
+ * other process joining the party.
+ */
+ if (dbmfp->fileid == NULL) {
+ if ((ret = __os_fileid(dbenv, rpath, 0, idbuf)) != 0)
+ goto err;
+ dbmfp->fileid = idbuf;
+ }
+
+ /*
+ * If our caller knows what mfp we're using, increment the ref count,
+ * no need to search.
+ *
+ * We don't need to acquire a lock other than the mfp itself, because
+ * we know there's another reference and it's not going away.
+ */
+ if (mfp != NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ goto check_map;
+ }
+
+ /*
+ * If not creating a temporary file, walk the list of MPOOLFILE's,
+ * looking for a matching file. Files backed by temporary files
+ * or previously removed files can't match.
+ *
+ * DB_TRUNCATE support.
+ *
+ * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
+ * pair) plus a timestamp. If files are removed and created in less
+ * than a second, the fileID can be repeated. The problem with
+ * repetition happens when the file that previously had the fileID
+ * value still has pages in the pool, since we don't want to use them
+ * to satisfy requests for the new file.
+ *
+ * Because the DB_TRUNCATE flag reuses the dev/inode pair, repeated
+ * opens with that flag set guarantees matching fileIDs when the
+ * machine can open a file and then re-open with truncate within a
+ * second. For this reason, we pass that flag down, and, if we find
+ * a matching entry, we ensure that it's never found again, and we
+ * create a new entry for the current request.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Skip dead files and temporary files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Skip non-matching files. */
+ if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo,
+ mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /*
+ * If the file is being truncated, remove it from the system
+ * and create a new entry.
+ *
+ * !!!
+ * We should be able to set mfp to NULL and break out of the
+ * loop, but I like the idea of checking all the entries.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ continue;
+ }
+
+ /*
+ * Some things about a file cannot be changed: the clear length,
+ * page size, or lSN location.
+ *
+ * The file type can change if the application's pre- and post-
+ * processing needs change. For example, an application that
+ * created a hash subdatabase in a database that was previously
+ * all btree.
+ *
+ * XXX
+ * We do not check to see if the pgcookie information changed,
+ * or update it if it is, this might be a bug.
+ */
+ if (dbmfp->clear_len != mfp->clear_len ||
+ pagesize != mfp->stat.st_pagesize ||
+ dbmfp->lsn_offset != mfp->lsn_off) {
+ __db_err(dbenv,
+ "%s: clear length, page size or LSN location changed",
+ path);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (dbmfp->ftype != 0)
+ mfp->ftype = dbmfp->ftype;
+
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ break;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (mfp != NULL)
+ goto check_map;
+
+alloc: /* Allocate and initialize a new MPOOLFILE. */
+ if ((ret = __memp_alloc(
+ dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
+ goto err;
+ mfp_alloc = 1;
+ memset(mfp, 0, sizeof(MPOOLFILE));
+ mfp->mpf_cnt = 1;
+ mfp->ftype = dbmfp->ftype;
+ mfp->stat.st_pagesize = pagesize;
+ mfp->lsn_off = dbmfp->lsn_offset;
+ mfp->clear_len = dbmfp->clear_len;
+
+ if (LF_ISSET(DB_DIRECT))
+ F_SET(mfp, MP_DIRECT);
+ if (LF_ISSET(DB_EXTENT))
+ F_SET(mfp, MP_EXTENT);
+ F_SET(mfp, MP_CAN_MMAP);
+
+ if (path == NULL)
+ F_SET(mfp, MP_TEMP);
+ else {
+ /*
+ * Don't permit files that aren't a multiple of the pagesize,
+ * and find the number of the last page in the file, all the
+ * time being careful not to overflow 32 bits.
+ *
+ * During verify or recovery, we might have to cope with a
+ * truncated file; if the file size is not a multiple of the
+ * page size, round down to a page, we'll take care of the
+ * partial page outside the mpool system.
+ */
+ if (bytes % pagesize != 0) {
+ if (LF_ISSET(DB_ODDFILESIZE))
+ bytes -= (u_int32_t)(bytes % pagesize);
+ else {
+ __db_err(dbenv,
+ "%s: file size not a multiple of the pagesize", rpath);
+ ret = EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * If the user specifies DB_MPOOL_LAST or DB_MPOOL_NEW on a
+ * page get, we have to increment the last page in the file.
+ * Figure it out and save it away.
+ *
+ * Note correction: page numbers are zero-based, not 1-based.
+ */
+ last_pgno = (db_pgno_t)(mbytes * (MEGABYTE / pagesize));
+ last_pgno += (db_pgno_t)(bytes / pagesize);
+ if (last_pgno != 0)
+ --last_pgno;
+ mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
+
+ /* Copy the file path into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+ goto err;
+ memcpy(p, path, strlen(path) + 1);
+
+ /* Copy the file identification string into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
+ goto err;
+ memcpy(p, dbmfp->fileid, DB_FILE_ID_LEN);
+ }
+
+ /* Copy the page cookie into shared memory. */
+ if (dbmfp->pgcookie == NULL || dbmfp->pgcookie->size == 0) {
+ mfp->pgcookie_len = 0;
+ mfp->pgcookie_off = 0;
+ } else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, dbmfp->pgcookie->size, &mfp->pgcookie_off, &p)) != 0)
+ goto err;
+ memcpy(p, dbmfp->pgcookie->data, dbmfp->pgcookie->size);
+ mfp->pgcookie_len = dbmfp->pgcookie->size;
+ }
+
+ /*
+ * Prepend the MPOOLFILE to the list of MPOOLFILE's.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ret = __db_mutex_setup(dbenv, dbmp->reginfo, &mfp->mutex,
+ MUTEX_NO_RLOCK);
+ if (ret == 0)
+ SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+
+check_map:
+ /*
+ * If a file:
+ * + isn't temporary
+ * + is read-only
+ * + doesn't require any pgin/pgout support
+ * + the DB_NOMMAP flag wasn't set (in either the file open or
+ * the environment in which it was opened)
+ * + and is less than mp_mmapsize bytes in size
+ *
+ * we can mmap it instead of reading/writing buffers. Don't do error
+ * checking based on the mmap call failure. We want to do normal I/O
+ * on the file if the reason we failed was because the file was on an
+ * NFS mounted partition, and we can fail in buffer I/O just as easily
+ * as here.
+ *
+ * We'd like to test to see if the file is too big to mmap. Since we
+ * don't know what size or type off_t's or size_t's are, or the largest
+ * unsigned integral type is, or what random insanity the local C
+ * compiler will perpetrate, doing the comparison in a portable way is
+ * flatly impossible. Hope that mmap fails if the file is too large.
+ */
+#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 MB. */
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ if (path == NULL)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (!F_ISSET(dbmfp, MP_READONLY))
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (dbmfp->ftype != 0)
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP))
+ F_CLR(mfp, MP_CAN_MMAP);
+ maxmap = dbenv->mp_mmapsize == 0 ?
+ DB_MAXMMAPSIZE : dbenv->mp_mmapsize;
+ if (mbytes > maxmap / MEGABYTE ||
+ (mbytes == maxmap / MEGABYTE && bytes >= maxmap % MEGABYTE))
+ F_CLR(mfp, MP_CAN_MMAP);
+
+ dbmfp->addr = NULL;
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ dbmfp->len = (size_t)mbytes * MEGABYTE + bytes;
+ if (__os_mapfile(dbenv, rpath,
+ dbmfp->fhp, dbmfp->len, 1, &dbmfp->addr) != 0) {
+ dbmfp->addr = NULL;
+ F_CLR(mfp, MP_CAN_MMAP);
+ }
+ }
+ }
+
+ dbmfp->mfp = mfp;
+
+ F_SET(dbmfp, MP_OPEN_CALLED);
+
+ /* Add the file to the process' list of DB_MPOOLFILEs. */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ TAILQ_INSERT_TAIL(&dbmp->dbmfq, dbmfp, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (0) {
+err: if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbmfp->fhp);
+
+ if (mfp_alloc) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ }
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
+ return (ret);
+}
+
+/*
+ * __memp_get_fileid --
+ * Return the file ID.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_get_fileid(dbmfp, fidp)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fidp;
+{
+ /*
+ * No lock needed -- we're using the handle, it had better not
+ * be going away.
+ *
+ * !!!
+ * Get the fileID out of the region, not out of the DB_MPOOLFILE
+ * structure because the DB_MPOOLFILE reference is possibly short
+ * lived, and isn't to be trusted.
+ */
+ memcpy(fidp, R_ADDR(
+ dbmfp->dbmp->reginfo, dbmfp->mfp->fileid_off), DB_FILE_ID_LEN);
+}
+
+/*
+ * __memp_last_pgno --
+ * Return the page number of the last page in the file.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_last_pgno(dbmfp, pgnoaddr)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = dbmfp->mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+}
+
+/*
+ * __memp_refcnt --
+ * Return the current reference count.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_refcnt(dbmfp, cntp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *cntp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbmfp->dbmp->dbenv;
+
+ MUTEX_LOCK(dbenv, &dbmfp->mfp->mutex);
+ *cntp = dbmfp->mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &dbmfp->mfp->mutex);
+}
+
+/*
+ * __memp_set_unlink --
+ * Set unlink on last close flag.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_set_unlink(dbmpf, set)
+ DB_MPOOLFILE *dbmpf;
+ int set;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbmpf->dbmp->dbenv;
+
+ MUTEX_LOCK(dbenv, &dbmpf->mfp->mutex);
+ if (set)
+ F_SET(dbmpf->mfp, MP_UNLINK);
+ else
+ F_CLR(dbmpf->mfp, MP_UNLINK);
+ MUTEX_UNLOCK(dbenv, &dbmpf->mfp->mutex);
+}
+
+/*
+ * memp_fclose --
+ * Close a backing file for the memory pool.
+ */
+static int
+__memp_fclose(dbmfp, flags)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbmfp->dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * XXX
+ * DB_MPOOL_DISCARD: Undocumented flag: DB private.
+ */
+ ret = __db_fchk(dbenv, "DB_MPOOLFILE->close", flags, DB_MPOOL_DISCARD);
+
+ if ((t_ret = __memp_fclose_int(dbmfp, flags)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __memp_fclose_int --
+ * Internal version of __memp_fclose.
+ *
+ * PUBLIC: int __memp_fclose_int __P((DB_MPOOLFILE *, u_int32_t));
+ */
+int
+__memp_fclose_int(dbmfp, flags)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+ char *rpath;
+ int deleted, ret, t_ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+ ret = 0;
+
+ /*
+ * We have to reference count DB_MPOOLFILE structures as other threads
+ * in the process may be using them. Here's the problem:
+ *
+ * Thread A opens a database.
+ * Thread B uses thread A's DB_MPOOLFILE to write a buffer
+ * in order to free up memory in the mpool cache.
+ * Thread A closes the database while thread B is using the
+ * DB_MPOOLFILE structure.
+ *
+ * By opening all databases before creating any threads, and closing
+ * the databases after all the threads have exited, applications get
+ * better performance and avoid the problem path entirely.
+ *
+ * Regardless, holding the DB_MPOOLFILE to flush a dirty buffer is a
+ * short-term lock, even in worst case, since we better be the only
+ * thread of control using the DB_MPOOLFILE structure to read pages
+ * *into* the cache. Wait until we're the only reference holder and
+ * remove the DB_MPOOLFILE structure from the list, so nobody else can
+ * find it. We do this, rather than have the last reference holder
+ * (whoever that might be) discard the DB_MPOOLFILE structure, because
+ * we'd rather write error messages to the application in the close
+ * routine, not in the checkpoint/sync routine.
+ *
+ * !!!
+ * It's possible the DB_MPOOLFILE was never added to the DB_MPOOLFILE
+ * file list, check the DB_OPEN_CALLED flag to be sure.
+ */
+ for (deleted = 0;;) {
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (dbmfp->ref == 1) {
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED))
+ TAILQ_REMOVE(&dbmp->dbmfq, dbmfp, q);
+ deleted = 1;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ if (deleted)
+ break;
+ __os_sleep(dbenv, 1, 0);
+ }
+
+ /* Complain if pinned blocks never returned. */
+ if (dbmfp->pinref != 0) {
+ __db_err(dbenv, "%s: close: %lu blocks left pinned",
+ __memp_fn(dbmfp), (u_long)dbmfp->pinref);
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+
+ /* Discard any mmap information. */
+ if (dbmfp->addr != NULL &&
+ (ret = __os_unmapfile(dbenv, dbmfp->addr, dbmfp->len)) != 0)
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(ret));
+
+ /* Close the file; temporary files may not yet have been created. */
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbmfp->fhp)) != 0) {
+ __db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(t_ret));
+ if (ret == 0)
+ ret = t_ret;
+ }
+
+ /* Discard the thread mutex. */
+ if (dbmfp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmfp->mutexp);
+
+ /*
+ * Discard our reference on the the underlying MPOOLFILE, and close
+ * it if it's no longer useful to anyone. It possible the open of
+ * the file never happened or wasn't successful, in which case, mpf
+ * will be NULL;
+ */
+ if ((mfp = dbmfp->mfp) == NULL)
+ goto done;
+
+ /*
+ * If it's a temp file, all outstanding references belong to unflushed
+ * buffers. (A temp file can only be referenced by one DB_MPOOLFILE).
+ * We don't care about preserving any of those buffers, so mark the
+ * MPOOLFILE as dead so that even the dirty ones just get discarded
+ * when we try to flush them.
+ */
+ deleted = 0;
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
+ if (LF_ISSET(DB_MPOOL_DISCARD) ||
+ F_ISSET(mfp, MP_TEMP | MP_UNLINK))
+ MPOOLFILE_IGNORE(mfp);
+ if (F_ISSET(mfp, MP_UNLINK)) {
+ if ((t_ret = __db_appname(dbmp->dbenv,
+ DB_APP_DATA, R_ADDR(dbmp->reginfo,
+ mfp->path_off), 0, NULL, &rpath)) != 0 && ret == 0)
+ ret = t_ret;
+ if (t_ret == 0) {
+ if ((t_ret = __os_unlink(
+ dbmp->dbenv, rpath) != 0) && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, rpath);
+ }
+ }
+ if (mfp->block_cnt == 0) {
+ if ((t_ret =
+ __memp_mf_discard(dbmp, mfp)) != 0 && ret == 0)
+ ret = t_ret;
+ deleted = 1;
+ }
+ }
+ if (deleted == 0)
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /* Discard the DB_MPOOLFILE structure. */
+done: __os_free(dbenv, dbmfp->fhp);
+ __os_free(dbenv, dbmfp);
+
+ return (ret);
+}
+
+/*
+ * __memp_mf_discard --
+ * Discard an MPOOLFILE.
+ *
+ * PUBLIC: int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+ */
+int
+__memp_mf_discard(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ DB_MPOOL_STAT *sp;
+ MPOOL *mp;
+ char *rpath;
+ int ret;
+
+ dbenv = dbmp->dbenv;
+ mp = dbmp->reginfo[0].primary;
+ ret = 0;
+
+ /*
+ * Expects caller to be holding the MPOOLFILE mutex.
+ *
+ * When discarding a file, we have to flush writes from it to disk.
+ * The scenario is that dirty buffers from this file need to be
+ * flushed to satisfy a future checkpoint, but when the checkpoint
+ * calls mpool sync, the sync code won't know anything about them.
+ */
+ if (!F_ISSET(mfp, MP_DEADFILE) &&
+ (ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) == 0) {
+ if ((ret = __os_open(dbenv, rpath, 0, 0, &fh)) == 0) {
+ ret = __os_fsync(dbenv, &fh);
+ (void)__os_closehandle(dbenv, &fh);
+ }
+ __os_free(dbenv, rpath);
+ }
+
+ /*
+ * We have to release the MPOOLFILE lock before acquiring the region
+ * lock so that we don't deadlock. Make sure nobody ever looks at
+ * this structure again.
+ */
+ MPOOLFILE_IGNORE(mfp);
+
+ /* Discard the mutex we're holding. */
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ /* Delete from the list of MPOOLFILEs. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile);
+
+ /* Copy the statistics into the region. */
+ sp = &mp->stat;
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_map += mfp->stat.st_map;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+
+ /* Clear the mutex this MPOOLFILE recorded. */
+ __db_shlocks_clear(&mfp->mutex, dbmp->reginfo,
+ (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off));
+
+ /* Free the space. */
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ if (mfp->pgcookie_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->pgcookie_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __memp_fn --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fn __P((DB_MPOOLFILE *));
+ */
+char *
+__memp_fn(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ return (__memp_fns(dbmfp->dbmp, dbmfp->mfp));
+}
+
+/*
+ * __memp_fns --
+ * On errors we print whatever is available as the file name.
+ *
+ * PUBLIC: char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+ *
+ */
+char *
+__memp_fns(dbmp, mfp)
+ DB_MPOOL *dbmp;
+ MPOOLFILE *mfp;
+{
+ if (mfp->path_off == 0)
+ return ((char *)"temporary");
+
+ return ((char *)R_ADDR(dbmp->reginfo, mfp->path_off));
+}
diff --git a/storage/bdb/mp/mp_fput.c b/storage/bdb/mp/mp_fput.c
new file mode 100644
index 00000000000..271e44a4ef8
--- /dev/null
+++ b/storage/bdb/mp/mp_fput.c
@@ -0,0 +1,202 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fput.c,v 11.36 2002/08/09 19:04:11 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_fput --
+ * Mpool file put function.
+ *
+ * PUBLIC: int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+ */
+int
+__memp_fput(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *argbhp, *bhp, *prev;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
+ int adjust, ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags) {
+ if ((ret = __db_fchk(dbenv, "memp_fput", flags,
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fput",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv,
+ "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+ }
+
+ /*
+ * If we're mapping the file, there's nothing to do. Because we can
+ * stop mapping the file at any time, we have to check on each buffer
+ * to see if the address we gave the application was part of the map
+ * region.
+ */
+ if (dbmfp->addr != NULL && pgaddr >= dbmfp->addr &&
+ (u_int8_t *)pgaddr <= (u_int8_t *)dbmfp->addr + dbmfp->len)
+ return (0);
+
+#ifdef DIAGNOSTIC
+ /*
+ * Decrement the per-file pinned buffer count (mapped pages aren't
+ * counted).
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (dbmfp->pinref == 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: more pages returned than retrieved", __memp_fn(dbmfp));
+ } else {
+ ret = 0;
+ --dbmfp->pinref;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ return (ret);
+#endif
+
+ /* Convert a page address to a buffer header and hash bucket. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ /*
+ * Check for a reference count going to zero. This can happen if the
+ * application returns a page twice.
+ */
+ if (bhp->ref == 0) {
+ __db_err(dbenv, "%s: page %lu: unpinned page returned",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (EINVAL);
+ }
+
+ /*
+ * If more than one reference to the page or a reference other than a
+ * thread waiting to flush the buffer to disk, we're done. Ignore the
+ * discard flags (for now) and leave the buffer's priority alone.
+ */
+ if (--bhp->ref > 1 || (bhp->ref == 1 && !F_ISSET(bhp, BH_LOCKED))) {
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (0);
+ }
+
+ /* Update priority values. */
+ if (F_ISSET(bhp, BH_DISCARD) ||
+ dbmfp->mfp->priority == MPOOL_PRI_VERY_LOW)
+ bhp->priority = 0;
+ else {
+ /*
+ * We don't lock the LRU counter or the stat.st_pages field, if
+ * we get garbage (which won't happen on a 32-bit machine), it
+ * only means a buffer has the wrong priority.
+ */
+ bhp->priority = c_mp->lru_count;
+
+ adjust = 0;
+ if (dbmfp->mfp->priority != 0)
+ adjust =
+ (int)c_mp->stat.st_pages / dbmfp->mfp->priority;
+ if (F_ISSET(bhp, BH_DIRTY))
+ adjust += c_mp->stat.st_pages / MPOOL_PRI_DIRTY;
+
+ if (adjust > 0) {
+ if (UINT32_T_MAX - bhp->priority <= (u_int32_t)adjust)
+ bhp->priority += adjust;
+ } else if (adjust < 0)
+ if (bhp->priority > (u_int32_t)-adjust)
+ bhp->priority += adjust;
+ }
+
+ /*
+ * Buffers on hash buckets are sorted by priority -- move the buffer
+ * to the correct position in the list.
+ */
+ argbhp = bhp;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, argbhp, hq, __bh);
+
+ prev = NULL;
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; prev = bhp, bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority > argbhp->priority)
+ break;
+ if (prev == NULL)
+ SH_TAILQ_INSERT_HEAD(&hp->hash_bucket, argbhp, hq, __bh);
+ else
+ SH_TAILQ_INSERT_AFTER(&hp->hash_bucket, prev, argbhp, hq, __bh);
+
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
+
+ /*
+ * The sync code has a separate counter for buffers on which it waits.
+ * It reads that value without holding a lock so we update it as the
+ * last thing we do. Once that value goes to 0, we won't see another
+ * reference to that buffer being returned to the cache until the sync
+ * code has finished, so we're safe as long as we don't let the value
+ * go to 0 before we finish with the buffer.
+ */
+ if (F_ISSET(argbhp, BH_LOCKED) && argbhp->ref_sync != 0)
+ --argbhp->ref_sync;
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_fset.c b/storage/bdb/mp/mp_fset.c
new file mode 100644
index 00000000000..65cd6286ac9
--- /dev/null
+++ b/storage/bdb/mp/mp_fset.c
@@ -0,0 +1,89 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_fset.c,v 11.25 2002/05/03 15:21:17 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_fset --
+ * Mpool page set-flag routine.
+ *
+ * PUBLIC: int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+ */
+int
+__memp_fset(dbmfp, pgaddr, flags)
+ DB_MPOOLFILE *dbmfp;
+ void *pgaddr;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if (flags == 0)
+ return (__db_ferr(dbenv, "memp_fset", 1));
+
+ if ((ret = __db_fchk(dbenv, "memp_fset", flags,
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "memp_fset",
+ flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+
+ if (LF_ISSET(DB_MPOOL_DIRTY) && F_ISSET(dbmfp, MP_READONLY)) {
+ __db_err(dbenv, "%s: dirty flag set for readonly file page",
+ __memp_fn(dbmfp));
+ return (EACCES);
+ }
+
+ /* Convert the page address to a buffer header and hash bucket. */
+ bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
+ F_CLR(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY);
+ }
+ if (LF_ISSET(DB_MPOOL_DISCARD))
+ F_SET(bhp, BH_DISCARD);
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_method.c b/storage/bdb/mp/mp_method.c
new file mode 100644
index 00000000000..38f0a645f16
--- /dev/null
+++ b/storage/bdb/mp/mp_method.c
@@ -0,0 +1,156 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_method.c,v 11.29 2002/03/27 04:32:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+static int __memp_set_mp_mmapsize __P((DB_ENV *, size_t));
+
+/*
+ * __memp_dbenv_create --
+ * Mpool specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __memp_dbenv_create __P((DB_ENV *));
+ */
+void
+__memp_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * We default to 32 8K pages. We don't default to a flat 256K, because
+ * some systems require significantly more memory to hold 32 pages than
+ * others. For example, HP-UX with POSIX pthreads needs 88 bytes for
+ * a POSIX pthread mutex and almost 200 bytes per buffer header, while
+ * Solaris needs 24 and 52 bytes for the same structures. The minimum
+ * number of hash buckets is 37. These contain a mutex also.
+ */
+ dbenv->mp_bytes =
+ 32 * ((8 * 1024) + sizeof(BH)) + 37 * sizeof(DB_MPOOL_HASH);
+ dbenv->mp_ncache = 1;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_cachesize = __dbcl_env_cachesize;
+ dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize;
+ dbenv->memp_dump_region = NULL;
+ dbenv->memp_fcreate = __dbcl_memp_fcreate;
+ dbenv->memp_nameop = NULL;
+ dbenv->memp_register = __dbcl_memp_register;
+ dbenv->memp_stat = __dbcl_memp_stat;
+ dbenv->memp_sync = __dbcl_memp_sync;
+ dbenv->memp_trickle = __dbcl_memp_trickle;
+ } else
+#endif
+ {
+ dbenv->set_cachesize = __memp_set_cachesize;
+ dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
+ dbenv->memp_dump_region = __memp_dump_region;
+ dbenv->memp_fcreate = __memp_fcreate;
+ dbenv->memp_nameop = __memp_nameop;
+ dbenv->memp_register = __memp_register;
+ dbenv->memp_stat = __memp_stat;
+ dbenv->memp_sync = __memp_sync;
+ dbenv->memp_trickle = __memp_trickle;
+ }
+}
+
+/*
+ * __memp_set_cachesize --
+ * Initialize the cache size.
+ */
+static int
+__memp_set_cachesize(dbenv, gbytes, bytes, ncache)
+ DB_ENV *dbenv;
+ u_int32_t gbytes, bytes;
+ int ncache;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_cachesize");
+
+ /* Normalize the values. */
+ if (ncache == 0)
+ ncache = 1;
+
+ /*
+ * You can only store 4GB-1 in an unsigned 32-bit value, so correct for
+ * applications that specify 4GB cache sizes -- we know what they meant.
+ */
+ if (gbytes / ncache == 4 && bytes == 0) {
+ --gbytes;
+ bytes = GIGABYTE - 1;
+ } else {
+ gbytes += bytes / GIGABYTE;
+ bytes %= GIGABYTE;
+ }
+
+ /* Avoid too-large cache sizes, they result in a region size of zero. */
+ if (gbytes / ncache > 4 || (gbytes / ncache == 4 && bytes != 0)) {
+ __db_err(dbenv, "individual cache size too large");
+ return (EINVAL);
+ }
+
+ /*
+ * If the application requested less than 500Mb, increase the cachesize
+ * by 25% and factor in the size of the hash buckets to account for our
+ * overhead. (I'm guessing caches over 500Mb are specifically sized,
+ * that is, it's a large server and the application actually knows how
+ * much memory is available. We only document the 25% overhead number,
+ * not the hash buckets, but I don't see a reason to confuse the issue,
+ * it shouldn't matter to an application.)
+ *
+ * There is a minimum cache size, regardless.
+ */
+ if (gbytes == 0) {
+ if (bytes < 500 * MEGABYTE)
+ bytes += (bytes / 4) + 37 * sizeof(DB_MPOOL_HASH);
+ if (bytes / ncache < DB_CACHESIZE_MIN)
+ bytes = ncache * DB_CACHESIZE_MIN;
+ }
+
+ dbenv->mp_gbytes = gbytes;
+ dbenv->mp_bytes = bytes;
+ dbenv->mp_ncache = ncache;
+
+ return (0);
+}
+
+/*
+ * __memp_set_mp_mmapsize --
+ * Set the maximum mapped file size.
+ */
+static int
+__memp_set_mp_mmapsize(dbenv, mp_mmapsize )
+ DB_ENV *dbenv;
+ size_t mp_mmapsize;
+{
+ dbenv->mp_mmapsize = mp_mmapsize;
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_region.c b/storage/bdb/mp/mp_region.c
new file mode 100644
index 00000000000..06eca2f8646
--- /dev/null
+++ b/storage/bdb/mp/mp_region.c
@@ -0,0 +1,466 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_region.c,v 11.49 2002/05/07 18:42:20 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+static int __mpool_init __P((DB_ENV *, DB_MPOOL *, int, int));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static size_t __mpool_region_maint __P((REGINFO *));
+#endif
+
+/*
+ * __memp_open --
+ * Internal version of memp_open: only called from DB_ENV->open.
+ *
+ * PUBLIC: int __memp_open __P((DB_ENV *));
+ */
+int
+__memp_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ REGINFO reginfo;
+ roff_t reg_size, *regids;
+ u_int32_t i;
+ int htab_buckets, ret;
+
+ /* Figure out how big each cache region is. */
+ reg_size = (dbenv->mp_gbytes / dbenv->mp_ncache) * GIGABYTE;
+ reg_size += ((dbenv->mp_gbytes %
+ dbenv->mp_ncache) * GIGABYTE) / dbenv->mp_ncache;
+ reg_size += dbenv->mp_bytes / dbenv->mp_ncache;
+
+ /*
+ * Figure out how many hash buckets each region will have. Assume we
+ * want to keep the hash chains with under 10 pages on each chain. We
+ * don't know the pagesize in advance, and it may differ for different
+ * files. Use a pagesize of 1K for the calculation -- we walk these
+ * chains a lot, they must be kept short.
+ */
+ htab_buckets = __db_tablesize((reg_size / (1 * 1024)) / 10);
+
+ /* Create and initialize the DB_MPOOL structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(*dbmp), &dbmp)) != 0)
+ return (ret);
+ LIST_INIT(&dbmp->dbregq);
+ TAILQ_INIT(&dbmp->dbmfq);
+ dbmp->dbenv = dbenv;
+
+ /* Join/create the first mpool region. */
+ memset(&reginfo, 0, sizeof(REGINFO));
+ reginfo.type = REGION_TYPE_MPOOL;
+ reginfo.id = INVALID_REGION_ID;
+ reginfo.mode = dbenv->db_mode;
+ reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv, &reginfo, reg_size)) != 0)
+ goto err;
+
+ /*
+ * If we created the region, initialize it. Create or join any
+ * additional regions.
+ */
+ if (F_ISSET(&reginfo, REGION_CREATE)) {
+ /*
+ * We define how many regions there are going to be, allocate
+ * the REGINFO structures and create them. Make sure we don't
+ * clear the wrong entries on error.
+ */
+ dbmp->nreg = dbenv->mp_ncache;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /* Initialize the first region. */
+ if ((ret = __mpool_init(dbenv, dbmp, 0, htab_buckets)) != 0)
+ goto err;
+
+ /*
+ * Create/initialize remaining regions and copy their IDs into
+ * the first region.
+ */
+ mp = R_ADDR(dbmp->reginfo, dbmp->reginfo[0].rp->primary);
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[i].mode = dbenv->db_mode;
+ dbmp->reginfo[i].flags = REGION_CREATE_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], reg_size)) != 0)
+ goto err;
+ if ((ret =
+ __mpool_init(dbenv, dbmp, i, htab_buckets)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+
+ regids[i] = dbmp->reginfo[i].id;
+ }
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ } else {
+ /*
+ * Determine how many regions there are going to be, allocate
+ * the REGINFO structures and fill in local copies of that
+ * information.
+ */
+ mp = R_ADDR(&reginfo, reginfo.rp->primary);
+ dbmp->nreg = mp->nreg;
+ if ((ret = __os_calloc(dbenv,
+ dbmp->nreg, sizeof(REGINFO), &dbmp->reginfo)) != 0)
+ goto err;
+ /* Make sure we don't clear the wrong entries on error. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].id = INVALID_REGION_ID;
+ dbmp->reginfo[0] = reginfo;
+
+ /*
+ * We have to unlock the primary mpool region before we attempt
+ * to join the additional mpool regions. If we don't, we can
+ * deadlock. The scenario is that we hold the primary mpool
+ * region lock. We then try to attach to an additional mpool
+ * region, which requires the acquisition/release of the main
+ * region lock (to search the list of regions). If another
+ * thread of control already holds the main region lock and is
+ * waiting on our primary mpool region lock, we'll deadlock.
+ * See [#4696] for more information.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ /* Join remaining regions. */
+ regids = R_ADDR(dbmp->reginfo, mp->regids);
+ for (i = 1; i < dbmp->nreg; ++i) {
+ dbmp->reginfo[i].type = REGION_TYPE_MPOOL;
+ dbmp->reginfo[i].id = regids[i];
+ dbmp->reginfo[i].mode = 0;
+ dbmp->reginfo[i].flags = REGION_JOIN_OK;
+ if ((ret = __db_r_attach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0)
+ goto err;
+ R_UNLOCK(dbenv, &dbmp->reginfo[i]);
+ }
+ }
+
+ /* Set the local addresses for the regions. */
+ for (i = 0; i < dbmp->nreg; ++i)
+ dbmp->reginfo[i].primary =
+ R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary);
+
+ /* If the region is threaded, allocate a mutex to lock the handles. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+
+ dbenv->mp_handle = dbmp;
+ return (0);
+
+err: if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
+ if (F_ISSET(dbmp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ for (i = 0; i < dbmp->nreg; ++i)
+ if (dbmp->reginfo[i].id != INVALID_REGION_ID)
+ (void)__db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0);
+ __os_free(dbenv, dbmp->reginfo);
+ }
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+ __os_free(dbenv, dbmp);
+ return (ret);
+}
+
+/*
+ * __mpool_init --
+ * Initialize a MPOOL structure in shared memory.
+ */
+static int
+__mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ int reginfo_off, htab_buckets;
+{
+ DB_MPOOL_HASH *htab;
+ MPOOL *mp;
+ REGINFO *reginfo;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ size_t maint_size;
+#endif
+ int i, ret;
+ void *p;
+
+ mp = NULL;
+
+ reginfo = &dbmp->reginfo[reginfo_off];
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(MPOOL), MUTEX_ALIGN, &reginfo->primary)) != 0)
+ goto mem_err;
+ reginfo->rp->primary = R_OFFSET(reginfo, reginfo->primary);
+ mp = reginfo->primary;
+ memset(mp, 0, sizeof(*mp));
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ maint_size = __mpool_region_maint(reginfo);
+ /* Allocate room for the maintenance info and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ sizeof(REGMAINT) + maint_size, 0, &p)) != 0)
+ goto mem_err;
+ __db_maintinit(reginfo, p, maint_size);
+ mp->maint_off = R_OFFSET(reginfo, p);
+#endif
+
+ if (reginfo_off == 0) {
+ SH_TAILQ_INIT(&mp->mpfq);
+
+ ZERO_LSN(mp->lsn);
+
+ mp->nreg = dbmp->nreg;
+ if ((ret = __db_shalloc(dbmp->reginfo[0].addr,
+ dbmp->nreg * sizeof(int), 0, &p)) != 0)
+ goto mem_err;
+ mp->regids = R_OFFSET(dbmp->reginfo, p);
+ }
+
+ /* Allocate hash table space and initialize it. */
+ if ((ret = __db_shalloc(reginfo->addr,
+ htab_buckets * sizeof(DB_MPOOL_HASH), 0, &htab)) != 0)
+ goto mem_err;
+ mp->htab = R_OFFSET(reginfo, htab);
+ for (i = 0; i < htab_buckets; i++) {
+ if ((ret = __db_mutex_setup(dbenv,
+ reginfo, &htab[i].hash_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ SH_TAILQ_INIT(&htab[i].hash_bucket);
+ htab[i].hash_page_dirty = htab[i].hash_priority = 0;
+ }
+ mp->htab_buckets = mp->stat.st_hash_buckets = htab_buckets;
+
+ /*
+ * Only the environment creator knows the total cache size, fill in
+ * those statistics now.
+ */
+ mp->stat.st_gbytes = dbenv->mp_gbytes;
+ mp->stat.st_bytes = dbenv->mp_bytes;
+ return (0);
+
+mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region");
+ return (ret);
+}
+
+/*
+ * __memp_dbenv_refresh --
+ * Clean up after the mpool system on a close or failed open.
+ *
+ * PUBLIC: int __memp_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__memp_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ DB_MPREG *mpreg;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ dbmp = dbenv->mp_handle;
+
+ /* Discard DB_MPREGs. */
+ while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) {
+ LIST_REMOVE(mpreg, q);
+ __os_free(dbenv, mpreg);
+ }
+
+ /* Discard DB_MPOOLFILEs. */
+ while ((dbmfp = TAILQ_FIRST(&dbmp->dbmfq)) != NULL)
+ if ((t_ret = __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the thread mutex. */
+ if (dbmp->mutexp != NULL)
+ __db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
+
+ /* Detach from the region(s). */
+ for (i = 0; i < dbmp->nreg; ++i)
+ if ((t_ret = __db_r_detach(
+ dbenv, &dbmp->reginfo[i], 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, dbmp->reginfo);
+ __os_free(dbenv, dbmp);
+
+ dbenv->mp_handle = NULL;
+ return (ret);
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __mpool_region_maint --
+ * Return the amount of space needed for region maintenance info.
+ *
+ */
+static size_t
+__mpool_region_maint(infop)
+ REGINFO *infop;
+{
+ size_t s;
+ int numlocks;
+
+ /*
+ * For mutex maintenance we need one mutex per possible page.
+ * Compute the maximum number of pages this cache can have.
+ * Also add in an mpool mutex and mutexes for all dbenv and db
+ * handles.
+ */
+ numlocks = ((infop->rp->size / DB_MIN_PGSIZE) + 1);
+ numlocks += DB_MAX_HANDLES;
+ s = sizeof(roff_t) * numlocks;
+ return (s);
+}
+#endif
+
+/*
+ * __mpool_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__mpool_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __memp_nameop
+ * Remove or rename a file in the pool.
+ *
+ * PUBLIC: int __memp_nameop __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, const char *, const char *, const char *));
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+int
+__memp_nameop(dbenv, fileid, newname, fullold, fullnew)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ const char *newname, *fullold, *fullnew;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ roff_t newname_off;
+ int locked, ret;
+ void *p;
+
+ locked = 0;
+ dbmp = NULL;
+
+ if (!MPOOL_ON(dbenv))
+ goto fsop;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /*
+ * Remove or rename a file that the mpool might know about. We assume
+ * that the fop layer has the file locked for exclusive access, so we
+ * don't worry about locking except for the mpool mutexes. Checkpoint
+ * can happen at any time, independent of file locking, so we have to
+ * do the actual unlink or rename system call to avoid any race.
+ *
+ * If this is a rename, allocate first, because we can't recursively
+ * grab the region lock.
+ */
+ if (newname == NULL)
+ p = NULL;
+ else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(newname) + 1, &newname_off, &p)) != 0)
+ return (ret);
+ memcpy(p, newname, strlen(newname) + 1);
+ }
+
+ locked = 1;
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Find the file -- if mpool doesn't know about this file, that's not
+ * an error-- we may not have it open.
+ */
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Ignore non-active files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Ignore non-matching files. */
+ if (memcmp(fileid, R_ADDR(
+ dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /* If newname is NULL, we're removing the file. */
+ if (newname == NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ } else {
+ /*
+ * Else, it's a rename. We've allocated memory
+ * for the new name. Swap it with the old one.
+ */
+ p = R_ADDR(dbmp->reginfo, mfp->path_off);
+ mfp->path_off = newname_off;
+ }
+ break;
+ }
+
+ /* Delete the memory we no longer need. */
+ if (p != NULL)
+ __db_shalloc_free(dbmp->reginfo[0].addr, p);
+
+fsop: if (newname == NULL)
+ (void)__os_unlink(dbenv, fullold);
+ else
+ (void)__os_rename(dbenv, fullold, fullnew, 1);
+
+ if (locked)
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_register.c b/storage/bdb/mp/mp_register.c
new file mode 100644
index 00000000000..46eefad986f
--- /dev/null
+++ b/storage/bdb/mp/mp_register.c
@@ -0,0 +1,76 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_register.c,v 11.21 2002/03/27 04:32:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * memp_register --
+ * Register a file type's pgin, pgout routines.
+ *
+ * PUBLIC: int __memp_register __P((DB_ENV *, int,
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ */
+int
+__memp_register(dbenv, ftype, pgin, pgout)
+ DB_ENV *dbenv;
+ int ftype;
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+{
+ DB_MPOOL *dbmp;
+ DB_MPREG *mpreg;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "DB_ENV->memp_register", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /*
+ * Chances are good that the item has already been registered, as the
+ * DB access methods are the folks that call this routine. If already
+ * registered, just update the entry, although it's probably unchanged.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (mpreg = LIST_FIRST(&dbmp->dbregq);
+ mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
+ if (mpreg->ftype == ftype) {
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (mpreg != NULL)
+ return (0);
+
+ /* New entry. */
+ if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), &mpreg)) != 0)
+ return (ret);
+
+ mpreg->ftype = ftype;
+ mpreg->pgin = pgin;
+ mpreg->pgout = pgout;
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ LIST_INSERT_HEAD(&dbmp->dbregq, mpreg, q);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_stat.c b/storage/bdb/mp/mp_stat.c
new file mode 100644
index 00000000000..12e72b91d70
--- /dev/null
+++ b/storage/bdb/mp/mp_stat.c
@@ -0,0 +1,491 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_stat.c,v 11.51 2002/08/06 06:13:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/mp.h"
+
+static void __memp_dumpcache __P((DB_ENV *,
+ DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t));
+static void __memp_pbh __P((DB_MPOOL *, BH *, size_t *, FILE *));
+static void __memp_stat_wait __P((REGINFO *, MPOOL *, DB_MPOOL_STAT *, int));
+
+/*
+ * __memp_stat --
+ * Display MPOOL statistics.
+ *
+ * PUBLIC: int __memp_stat
+ * PUBLIC: __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ */
+int
+__memp_stat(dbenv, gspp, fspp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOL_STAT **gspp;
+ DB_MPOOL_FSTAT ***fspp;
+ u_int32_t flags;
+{
+ DB_MPOOL *dbmp;
+ DB_MPOOL_FSTAT **tfsp, *tstruct;
+ DB_MPOOL_STAT *sp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ size_t len, nlen, pagesize;
+ u_int32_t pages, i;
+ int ret;
+ char *name, *tname;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_stat", DB_INIT_MPOOL);
+
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->memp_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /* Global statistics. */
+ if (gspp != NULL) {
+ *gspp = NULL;
+
+ if ((ret = __os_umalloc(dbenv, sizeof(**gspp), gspp)) != 0)
+ return (ret);
+ memset(*gspp, 0, sizeof(**gspp));
+ sp = *gspp;
+
+ /*
+ * Initialization and information that is not maintained on
+ * a per-cache basis.
+ */
+ c_mp = dbmp->reginfo[0].primary;
+ sp->st_gbytes = c_mp->stat.st_gbytes;
+ sp->st_bytes = c_mp->stat.st_bytes;
+ sp->st_ncache = dbmp->nreg;
+ sp->st_regsize = dbmp->reginfo[0].rp->size;
+
+ /* Walk the cache list and accumulate the global information. */
+ for (i = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+
+ sp->st_map += c_mp->stat.st_map;
+ sp->st_cache_hit += c_mp->stat.st_cache_hit;
+ sp->st_cache_miss += c_mp->stat.st_cache_miss;
+ sp->st_page_create += c_mp->stat.st_page_create;
+ sp->st_page_in += c_mp->stat.st_page_in;
+ sp->st_page_out += c_mp->stat.st_page_out;
+ sp->st_ro_evict += c_mp->stat.st_ro_evict;
+ sp->st_rw_evict += c_mp->stat.st_rw_evict;
+ sp->st_page_trickle += c_mp->stat.st_page_trickle;
+ sp->st_pages += c_mp->stat.st_pages;
+ /*
+ * st_page_dirty calculated by __memp_stat_hash
+ * st_page_clean calculated here
+ */
+ __memp_stat_hash(
+ &dbmp->reginfo[i], c_mp, &sp->st_page_dirty);
+ sp->st_page_clean = sp->st_pages - sp->st_page_dirty;
+ sp->st_hash_buckets += c_mp->stat.st_hash_buckets;
+ sp->st_hash_searches += c_mp->stat.st_hash_searches;
+ sp->st_hash_longest += c_mp->stat.st_hash_longest;
+ sp->st_hash_examined += c_mp->stat.st_hash_examined;
+ /*
+ * st_hash_nowait calculated by __memp_stat_wait
+ * st_hash_wait
+ */
+ __memp_stat_wait(&dbmp->reginfo[i], c_mp, sp, flags);
+ sp->st_region_nowait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait;
+ sp->st_region_wait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait;
+ sp->st_alloc += c_mp->stat.st_alloc;
+ sp->st_alloc_buckets += c_mp->stat.st_alloc_buckets;
+ if (sp->st_alloc_max_buckets <
+ c_mp->stat.st_alloc_max_buckets)
+ sp->st_alloc_max_buckets =
+ c_mp->stat.st_alloc_max_buckets;
+ sp->st_alloc_pages += c_mp->stat.st_alloc_pages;
+ if (sp->st_alloc_max_pages <
+ c_mp->stat.st_alloc_max_pages)
+ sp->st_alloc_max_pages =
+ c_mp->stat.st_alloc_max_pages;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait = 0;
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait = 0;
+ pages = c_mp->stat.st_pages;
+ memset(&c_mp->stat, 0, sizeof(c_mp->stat));
+ c_mp->stat.st_hash_buckets = c_mp->htab_buckets;
+ c_mp->stat.st_pages = pages;
+ }
+ }
+
+ /*
+ * We have duplicate statistics fields in per-file structures
+ * and the cache. The counters are only incremented in the
+ * per-file structures, except if a file is flushed from the
+ * mpool, at which time we copy its information into the cache
+ * statistics. We added the cache information above, now we
+ * add the per-file information.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ sp->st_map += mfp->stat.st_map;
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+ if (fspp == NULL && LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ /* Per-file statistics. */
+ if (fspp != NULL) {
+ *fspp = NULL;
+
+ /* Count the MPOOLFILE structures. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (i = 0, len = 0,
+ mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL;
+ ++i, mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
+ len += sizeof(DB_MPOOL_FSTAT *) +
+ sizeof(DB_MPOOL_FSTAT) +
+ strlen(__memp_fns(dbmp, mfp)) + 1;
+ len += sizeof(DB_MPOOL_FSTAT *); /* Trailing NULL */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (i == 0)
+ return (0);
+
+ /* Allocate space */
+ if ((ret = __os_umalloc(dbenv, len, fspp)) != 0)
+ return (ret);
+
+ /*
+ * Build each individual entry. We assume that an array of
+ * pointers are aligned correctly to be followed by an array
+ * of structures, which should be safe (in this particular
+ * case, the first element of the structure is a pointer, so
+ * we're doubly safe). The array is followed by space for
+ * the text file names.
+ *
+ * Add 1 to i because we need to skip over the NULL.
+ */
+ tfsp = *fspp;
+ tstruct = (DB_MPOOL_FSTAT *)(tfsp + i + 1);
+ tname = (char *)(tstruct + i);
+
+ /*
+ * Files may have been opened since we counted, don't walk
+ * off the end of the allocated space.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL && i-- > 0;
+ ++tfsp, ++tstruct, tname += nlen,
+ mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ name = __memp_fns(dbmp, mfp);
+ nlen = strlen(name) + 1;
+ *tfsp = tstruct;
+ *tstruct = mfp->stat;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
+ tstruct->file_name = tname;
+ memcpy(tname, name, nlen);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ *tfsp = NULL;
+ }
+ return (0);
+}
+
+#define FMAP_ENTRIES 200 /* Files we map. */
+
+#define MPOOL_DUMP_HASH 0x01 /* Debug hash chains. */
+#define MPOOL_DUMP_MEM 0x04 /* Debug region memory. */
+#define MPOOL_DUMP_ALL 0x07 /* Debug all. */
+
+/*
+ * __memp_dump_region --
+ * Display MPOOL structures.
+ *
+ * PUBLIC: int __memp_dump_region __P((DB_ENV *, char *, FILE *));
+ */
+int
+__memp_dump_region(dbenv, area, fp)
+ DB_ENV *dbenv;
+ char *area;
+ FILE *fp;
+{
+ static const FN fn[] = {
+ { MP_CAN_MMAP, "mmapped" },
+ { MP_DEADFILE, "dead" },
+ { MP_DIRECT, "no buffer" },
+ { MP_EXTENT, "extent" },
+ { MP_TEMP, "temporary" },
+ { MP_UNLINK, "unlink" },
+ { 0, NULL }
+ };
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ size_t fmap[FMAP_ENTRIES + 1];
+ u_int32_t i, flags;
+ int cnt;
+ u_int8_t *p;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_dump_region", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+
+ /* Make it easy to call from the debugger. */
+ if (fp == NULL)
+ fp = stderr;
+
+ for (flags = 0; *area != '\0'; ++area)
+ switch (*area) {
+ case 'A':
+ LF_SET(MPOOL_DUMP_ALL);
+ break;
+ case 'h':
+ LF_SET(MPOOL_DUMP_HASH);
+ break;
+ case 'm':
+ LF_SET(MPOOL_DUMP_MEM);
+ break;
+ }
+
+ mp = dbmp->reginfo[0].primary;
+
+ /* Display MPOOL structures. */
+ (void)fprintf(fp, "%s\nPool (region addr 0x%lx)\n",
+ DB_LINE, P_TO_ULONG(dbmp->reginfo[0].addr));
+
+ /* Display the MPOOLFILE structures. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (cnt = 0, mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: pagesize %lu\n", cnt + 1,
+ __memp_fns(dbmp, mfp), (u_long)mfp->stat.st_pagesize);
+ (void)fprintf(fp, "\t type %ld; ref %lu; blocks %lu; last %lu;",
+ (long)mfp->ftype, (u_long)mfp->mpf_cnt,
+ (u_long)mfp->block_cnt, (u_long)mfp->last_pgno);
+ __db_prflags(mfp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n\t UID: ");
+ p = R_ADDR(dbmp->reginfo, mfp->fileid_off);
+ for (i = 0; i < DB_FILE_ID_LEN; ++i, ++p) {
+ (void)fprintf(fp, "%x", (u_int)*p);
+ if (i < DB_FILE_ID_LEN - 1)
+ (void)fprintf(fp, " ");
+ }
+ (void)fprintf(fp, "\n");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) {
+ (void)fprintf(fp, "File #%d: %s: per-process, %s\n",
+ cnt + 1, __memp_fn(dbmfp),
+ F_ISSET(dbmfp, MP_READONLY) ? "readonly" : "read/write");
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (cnt < FMAP_ENTRIES)
+ fmap[cnt] = INVALID_ROFF;
+ else
+ fmap[FMAP_ENTRIES] = INVALID_ROFF;
+
+ /* Dump the memory pools. */
+ for (i = 0; i < mp->nreg; ++i) {
+ (void)fprintf(fp, "%s\nCache #%d:\n", DB_LINE, i + 1);
+ __memp_dumpcache(
+ dbenv, dbmp, &dbmp->reginfo[i], fmap, fp, flags);
+ }
+
+ /* Flush in case we're debugging. */
+ (void)fflush(fp);
+
+ return (0);
+}
+
+/*
+ * __memp_dumpcache --
+ * Display statistics for a cache.
+ */
+static void
+__memp_dumpcache(dbenv, dbmp, reginfo, fmap, fp, flags)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ REGINFO *reginfo;
+ size_t *fmap;
+ FILE *fp;
+ u_int32_t flags;
+{
+ BH *bhp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ int bucket;
+
+ c_mp = reginfo->primary;
+
+ /* Display the hash table list of BH's. */
+ if (LF_ISSET(MPOOL_DUMP_HASH)) {
+ (void)fprintf(fp,
+ "%s\nBH hash table (%lu hash slots)\nbucket (priority):\n",
+ DB_LINE, (u_long)c_mp->htab_buckets);
+ (void)fprintf(fp,
+ "\tpageno, file, ref, address [LSN] priority\n");
+
+ for (hp = R_ADDR(reginfo, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ if ((bhp =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL)
+ (void)fprintf(fp, "%lu (%u):\n",
+ (u_long)bucket, hp->hash_priority);
+ for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ __memp_pbh(dbmp, bhp, fmap, fp);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
+ }
+
+ /* Dump the memory pool. */
+ if (LF_ISSET(MPOOL_DUMP_MEM))
+ __db_shalloc_dump(reginfo->addr, fp);
+}
+
+/*
+ * __memp_pbh --
+ * Display a BH structure.
+ */
+static void
+__memp_pbh(dbmp, bhp, fmap, fp)
+ DB_MPOOL *dbmp;
+ BH *bhp;
+ size_t *fmap;
+ FILE *fp;
+{
+ static const FN fn[] = {
+ { BH_CALLPGIN, "callpgin" },
+ { BH_DIRTY, "dirty" },
+ { BH_DIRTY_CREATE, "created" },
+ { BH_DISCARD, "discard" },
+ { BH_LOCKED, "locked" },
+ { BH_TRASH, "trash" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; i < FMAP_ENTRIES; ++i)
+ if (fmap[i] == INVALID_ROFF || fmap[i] == bhp->mf_offset)
+ break;
+
+ if (fmap[i] == INVALID_ROFF)
+ (void)fprintf(fp, "\t%5lu, %lu, %2lu, %8lu [%lu,%lu] %lu",
+ (u_long)bhp->pgno, (u_long)bhp->mf_offset,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
+ else
+ (void)fprintf(fp, "\t%5lu, #%d, %2lu, %8lu [%lu,%lu] %lu",
+ (u_long)bhp->pgno, i + 1,
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
+
+ __db_prflags(bhp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n");
+}
+
+/*
+ * __memp_stat_hash --
+ * Total hash bucket stats (other than mutex wait) into the region.
+ *
+ * PUBLIC: void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *));
+ */
+void
+__memp_stat_hash(reginfo, mp, dirtyp)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ u_int32_t *dirtyp;
+{
+ DB_MPOOL_HASH *hp;
+ u_int32_t dirty;
+ int i;
+
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0, dirty = 0; i < mp->htab_buckets; i++, hp++)
+ dirty += hp->hash_page_dirty;
+ *dirtyp = dirty;
+}
+
+/*
+ * __memp_stat_wait --
+ * Total hash bucket wait stats into the region.
+ */
+static void
+__memp_stat_wait(reginfo, mp, mstat, flags)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ DB_MPOOL_STAT *mstat;
+ int flags;
+{
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
+ int i;
+
+ mstat->st_hash_max_wait = 0;
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0; i < mp->htab_buckets; i++, hp++) {
+ mutexp = &hp->hash_mutex;
+ mstat->st_hash_nowait += mutexp->mutex_set_nowait;
+ mstat->st_hash_wait += mutexp->mutex_set_wait;
+ if (mutexp->mutex_set_wait > mstat->st_hash_max_wait)
+ mstat->st_hash_max_wait = mutexp->mutex_set_wait;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mutexp->mutex_set_wait = 0;
+ mutexp->mutex_set_nowait = 0;
+ }
+ }
+}
diff --git a/storage/bdb/mp/mp_sync.c b/storage/bdb/mp/mp_sync.c
new file mode 100644
index 00000000000..03b42208b39
--- /dev/null
+++ b/storage/bdb/mp/mp_sync.c
@@ -0,0 +1,627 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_sync.c,v 11.64 2002/08/25 16:00:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+typedef struct {
+ DB_MPOOL_HASH *track_hp; /* Hash bucket. */
+
+ roff_t track_off; /* Page file offset. */
+ db_pgno_t track_pgno; /* Page number. */
+} BH_TRACK;
+
+static int __bhcmp __P((const void *, const void *));
+static int __memp_close_flush_files __P((DB_ENV *, DB_MPOOL *));
+static int __memp_sync_files __P((DB_ENV *, DB_MPOOL *));
+
+/*
+ * __memp_sync --
+ * Mpool sync function.
+ *
+ * PUBLIC: int __memp_sync __P((DB_ENV *, DB_LSN *));
+ */
+int
+__memp_sync(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_sync", DB_INIT_MPOOL);
+
+ /*
+ * If no LSN is provided, flush the entire cache (reasonable usage
+ * even if there's no log subsystem configured).
+ */
+ if (lsnp != NULL)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "memp_sync", DB_INIT_LOG);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /* If we've flushed to the requested LSN, return that information. */
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) <= 0) {
+ *lsnp = mp->lsn;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ if ((ret = __memp_sync_int(dbenv, NULL, 0, DB_SYNC_CACHE, NULL)) != 0)
+ return (ret);
+
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) > 0)
+ mp->lsn = *lsnp;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_fsync --
+ * Mpool file sync function.
+ *
+ * PUBLIC: int __memp_fsync __P((DB_MPOOLFILE *));
+ */
+int
+__memp_fsync(dbmfp)
+ DB_MPOOLFILE *dbmfp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * If this handle doesn't have a file descriptor that's open for
+ * writing, or if the file is a temporary, there's no reason to
+ * proceed further.
+ */
+ if (F_ISSET(dbmfp, MP_READONLY))
+ return (0);
+
+ if (F_ISSET(dbmfp->mfp, MP_TEMP))
+ return (0);
+
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
+}
+
+/*
+ * __mp_xxx_fh --
+ * Return a file descriptor for DB 1.85 compatibility locking.
+ *
+ * PUBLIC: int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+ */
+int
+__mp_xxx_fh(dbmfp, fhp)
+ DB_MPOOLFILE *dbmfp;
+ DB_FH **fhp;
+{
+ DB_ENV *dbenv;
+ /*
+ * This is a truly spectacular layering violation, intended ONLY to
+ * support compatibility for the DB 1.85 DB->fd call.
+ *
+ * Sync the database file to disk, creating the file as necessary.
+ *
+ * We skip the MP_READONLY and MP_TEMP tests done by memp_fsync(3).
+ * The MP_READONLY test isn't interesting because we will either
+ * already have a file descriptor (we opened the database file for
+ * reading) or we aren't readonly (we created the database which
+ * requires write privileges). The MP_TEMP test isn't interesting
+ * because we want to write to the backing file regardless so that
+ * we get a file descriptor to return.
+ */
+ *fhp = dbmfp->fhp;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ return (0);
+ dbenv = dbmfp->dbmp->dbenv;
+
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
+}
+
+/*
+ * __memp_sync_int --
+ * Mpool sync internal function.
+ *
+ * PUBLIC: int __memp_sync_int
+ * PUBLIC: __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *));
+ */
+int
+__memp_sync_int(dbenv, dbmfp, ar_max, op, wrotep)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ int ar_max, *wrotep;
+ db_sync_op op;
+{
+ BH *bhp;
+ BH_TRACK *bharray;
+ DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
+ MPOOL *c_mp, *mp;
+ MPOOLFILE *mfp;
+ u_int32_t n_cache;
+ int ar_cnt, hb_lock, i, pass, remaining, ret, t_ret, wait_cnt, wrote;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+ pass = wrote = 0;
+
+ /*
+ * If the caller does not specify how many pages assume one
+ * per bucket.
+ */
+ if (ar_max == 0)
+ ar_max = mp->nreg * mp->htab_buckets;
+
+ if ((ret =
+ __os_malloc(dbenv, ar_max * sizeof(BH_TRACK), &bharray)) != 0)
+ return (ret);
+
+ /*
+ * Walk each cache's list of buffers and mark all dirty buffers to be
+ * written and all pinned buffers to be potentially written, depending
+ * on our flags.
+ */
+ for (ar_cnt = 0, n_cache = 0; n_cache < mp->nreg; ++n_cache) {
+ c_mp = dbmp->reginfo[n_cache].primary;
+
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ for (i = 0; i < c_mp->htab_buckets; i++, hp++) {
+ /*
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero. We
+ * can ignore empty buckets because we only need write
+ * buffers that were dirty before we started.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ /* Always ignore unreferenced, clean pages. */
+ if (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ /*
+ * Checkpoints have to wait on all pinned pages,
+ * as pages may be marked dirty when returned to
+ * the cache.
+ *
+ * File syncs only wait on pages both pinned and
+ * dirty. (We don't care if pages are marked
+ * dirty when returned to the cache, that means
+ * there's another writing thread and flushing
+ * the cache for this handle is meaningless.)
+ */
+ if (op == DB_SYNC_FILE &&
+ !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /*
+ * Ignore temporary files -- this means you
+ * can't even flush temporary files by handle.
+ * (Checkpoint doesn't require temporary files
+ * be flushed and the underlying buffer write
+ * write routine may not be able to write it
+ * anyway.)
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ continue;
+
+ /*
+ * If we're flushing a specific file, see if
+ * this page is from that file.
+ */
+ if (dbmfp != NULL && mfp != dbmfp->mfp)
+ continue;
+
+ /*
+ * Ignore files that aren't involved in DB's
+ * transactional operations during checkpoints.
+ */
+ if (dbmfp == NULL && mfp->lsn_off == -1)
+ continue;
+
+ /* Track the buffer, we want it. */
+ bharray[ar_cnt].track_hp = hp;
+ bharray[ar_cnt].track_pgno = bhp->pgno;
+ bharray[ar_cnt].track_off = bhp->mf_offset;
+ ar_cnt++;
+
+ if (ar_cnt >= ar_max) {
+ if ((ret = __os_realloc(dbenv,
+ (ar_max * 2) * sizeof(BH_TRACK),
+ &bharray)) != 0)
+ break;
+ ar_max *= 2;
+ }
+ }
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ if (ret != 0)
+ goto err;
+ }
+ }
+
+ /* If there no buffers to write, we're done. */
+ if (ar_cnt == 0)
+ goto done;
+
+ /*
+ * Write the buffers in file/page order, trying to reduce seeks by the
+ * filesystem and, when pages are smaller than filesystem block sizes,
+ * reduce the actual number of writes.
+ */
+ if (ar_cnt > 1)
+ qsort(bharray, ar_cnt, sizeof(BH_TRACK), __bhcmp);
+
+ /*
+ * If we're trickling buffers, only write enough to reach the correct
+ * percentage for this region. We may not write enough if the dirty
+ * buffers have an unbalanced distribution among the regions, but that
+ * seems unlikely.
+ */
+ if (op == DB_SYNC_TRICKLE && ar_cnt > ar_max / (int)mp->nreg)
+ ar_cnt = ar_max / (int)mp->nreg;
+
+ /*
+ * Flush the log. We have to ensure the log records reflecting the
+ * changes on the database pages we're writing have already made it
+ * to disk. We still have to check the log each time we write a page
+ * (because pages we are about to write may be modified after we have
+ * flushed the log), but in general this will at least avoid any I/O
+ * on the log's part.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
+ /*
+ * Walk the array, writing buffers. When we write a buffer, we NULL
+ * out its hash bucket pointer so we don't process a slot more than
+ * once.
+ */
+ for (remaining = ar_cnt, i = pass = 0; remaining > 0; ++i) {
+ if (i >= ar_cnt) {
+ i = 0;
+ ++pass;
+ __os_sleep(dbenv, 1, 0);
+ }
+ if ((hp = bharray[i].track_hp) == NULL)
+ continue;
+
+ /* Lock the hash bucket and find the buffer. */
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->pgno == bharray[i].track_pgno &&
+ bhp->mf_offset == bharray[i].track_off)
+ break;
+
+ /*
+ * If we can't find the buffer we're done, somebody else had
+ * to have written it.
+ *
+ * If the buffer isn't pinned or dirty, we're done, there's
+ * no work needed.
+ */
+ if (bhp == NULL || (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ --remaining;
+ bharray[i].track_hp = NULL;
+ continue;
+ }
+
+ /*
+ * If the buffer is locked by another thread, ignore it, we'll
+ * come back to it.
+ *
+ * If the buffer is pinned and it's only the first or second
+ * time we have looked at it, ignore it, we'll come back to
+ * it.
+ *
+ * In either case, skip the buffer if we're not required to
+ * write it.
+ */
+ if (F_ISSET(bhp, BH_LOCKED) || (bhp->ref != 0 && pass < 2)) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ if (op != DB_SYNC_CACHE && op != DB_SYNC_FILE) {
+ --remaining;
+ bharray[i].track_hp = NULL;
+ }
+ continue;
+ }
+
+ /*
+ * The buffer is either pinned or dirty.
+ *
+ * Set the sync wait-for count, used to count down outstanding
+ * references to this buffer as they are returned to the cache.
+ */
+ bhp->ref_sync = bhp->ref;
+
+ /* Pin the buffer into memory and lock it. */
+ ++bhp->ref;
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+
+ /*
+ * Unlock the hash bucket and wait for the wait-for count to
+ * go to 0. No new thread can acquire the buffer because we
+ * have it locked.
+ *
+ * If a thread attempts to re-pin a page, the wait-for count
+ * will never go to 0 (the thread spins on our buffer lock,
+ * while we spin on the thread's ref count). Give up if we
+ * don't get the buffer in 3 seconds, we can try again later.
+ *
+ * If, when the wait-for count goes to 0, the buffer is found
+ * to be dirty, write it.
+ */
+ MUTEX_UNLOCK(dbenv, mutexp);
+ for (wait_cnt = 1;
+ bhp->ref_sync != 0 && wait_cnt < 4; ++wait_cnt)
+ __os_sleep(dbenv, 1, 0);
+ MUTEX_LOCK(dbenv, mutexp);
+ hb_lock = 1;
+
+ /*
+ * If the ref_sync count has gone to 0, we're going to be done
+ * with this buffer no matter what happens.
+ */
+ if (bhp->ref_sync == 0) {
+ --remaining;
+ bharray[i].track_hp = NULL;
+ }
+
+ /*
+ * If the ref_sync count has gone to 0 and the buffer is still
+ * dirty, we write it. We only try to write the buffer once.
+ * Any process checkpointing or trickle-flushing the pool
+ * must be able to write any underlying file -- if the write
+ * fails, error out. It would be very strange if file sync
+ * failed to write, but we don't care if it happens.
+ */
+ if (bhp->ref_sync == 0 && F_ISSET(bhp, BH_DIRTY)) {
+ hb_lock = 0;
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ if ((ret = __memp_bhwrite(dbmp, hp, mfp, bhp, 1)) == 0)
+ ++wrote;
+ else if (op == DB_SYNC_CACHE || op == DB_SYNC_TRICKLE)
+ __db_err(dbenv, "%s: unable to flush page: %lu",
+ __memp_fns(dbmp, mfp), (u_long)bhp->pgno);
+ else
+ ret = 0;
+ }
+
+ /*
+ * If ref_sync count never went to 0, the buffer was written
+ * by another thread, or the write failed, we still have the
+ * buffer locked.
+ *
+ * We may or may not currently hold the hash bucket mutex. If
+ * the __memp_bhwrite -> __memp_pgwrite call was successful,
+ * then __memp_pgwrite will have swapped the buffer lock for
+ * the hash lock. All other call paths will leave us without
+ * the hash bucket lock.
+ *
+ * The order of mutexes above was to acquire the buffer lock
+ * while holding the hash bucket lock. Don't deadlock here,
+ * release the buffer lock and then acquire the hash bucket
+ * lock.
+ */
+ if (F_ISSET(bhp, BH_LOCKED)) {
+ F_CLR(bhp, BH_LOCKED);
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
+
+ if (!hb_lock)
+ MUTEX_LOCK(dbenv, mutexp);
+ }
+
+ /*
+ * Reset the ref_sync count regardless of our success, we're
+ * done with this buffer for now.
+ */
+ bhp->ref_sync = 0;
+
+ /* Discard our reference and unlock the bucket. */
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ if (ret != 0)
+ break;
+ }
+
+done: /* If we've opened files to flush pages, close them. */
+ if ((t_ret = __memp_close_flush_files(dbenv, dbmp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * If doing a checkpoint or flushing a file for the application, we
+ * have to force the pages to disk. We don't do this as we go along
+ * because we want to give the OS as much time as possible to lazily
+ * flush, and because we have to flush files that might not even have
+ * had dirty buffers in the cache, so we have to walk the files list.
+ */
+ if (ret == 0 && (op == DB_SYNC_CACHE || op == DB_SYNC_FILE)) {
+ if (dbmfp == NULL)
+ ret = __memp_sync_files(dbenv, dbmp);
+ else
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ }
+
+err: __os_free(dbenv, bharray);
+ if (wrotep != NULL)
+ *wrotep = wrote;
+
+ return (ret);
+}
+
+/*
+ * __memp_sync_files --
+ * Sync all the files in the environment, open or not.
+ */
+static
+int __memp_sync_files(dbenv, dbmp)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+{
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ int ret, t_ret;
+
+ ret = 0;
+ mp = dbmp->reginfo[0].primary;
+
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ if (mfp->stat.st_page_out == 0 ||
+ F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Look for an already open handle. */
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0)
+ goto err;
+
+ /* If we don't find one, open one. */
+ if (dbmfp == NULL) {
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ goto err;
+ ret = __memp_fopen_int(
+ dbmfp, mfp, R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize);
+ if (ret == 0)
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ if ((t_ret =
+ __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+ }
+
+ if (0) {
+err: __db_err(dbenv, "%s: cannot sync: %s",
+ R_ADDR(dbmp->reginfo, mfp->path_off), db_strerror(ret));
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (ret);
+}
+
+/*
+ * __memp_close_flush_files --
+ * Close files opened only to flush buffers.
+ */
+static int
+__memp_close_flush_files(dbenv, dbmp)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+{
+ DB_MPOOLFILE *dbmfp;
+ int ret;
+
+ /*
+ * The routine exists because we must close files opened by sync to
+ * flush buffers. There are two cases: first, extent files have to
+ * be closed so they may be removed when empty. Second, regular
+ * files have to be closed so we don't run out of descriptors (for
+ * example, and application partitioning its data into databases
+ * based on timestamps, so there's a continually increasing set of
+ * files).
+ *
+ * We mark files opened in the __memp_bhwrite() function with the
+ * MP_FLUSH flag. Here we walk through our file descriptor list,
+ * and, if a file was opened by __memp_bhwrite(), we close it.
+ */
+retry: MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (F_ISSET(dbmfp, MP_FLUSH)) {
+ F_CLR(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if ((ret = __memp_fclose_int(dbmfp, 0)) != 0)
+ return (ret);
+ goto retry;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
+ return (0);
+}
+
+static int
+__bhcmp(p1, p2)
+ const void *p1, *p2;
+{
+ BH_TRACK *bhp1, *bhp2;
+
+ bhp1 = (BH_TRACK *)p1;
+ bhp2 = (BH_TRACK *)p2;
+
+ /* Sort by file (shared memory pool offset). */
+ if (bhp1->track_off < bhp2->track_off)
+ return (-1);
+ if (bhp1->track_off > bhp2->track_off)
+ return (1);
+
+ /*
+ * !!!
+ * Defend against badly written quicksort code calling the comparison
+ * function with two identical pointers (e.g., WATCOM C++ (Power++)).
+ */
+ if (bhp1->track_pgno < bhp2->track_pgno)
+ return (-1);
+ if (bhp1->track_pgno > bhp2->track_pgno)
+ return (1);
+ return (0);
+}
diff --git a/storage/bdb/mp/mp_trickle.c b/storage/bdb/mp/mp_trickle.c
new file mode 100644
index 00000000000..71077ab60cc
--- /dev/null
+++ b/storage/bdb/mp/mp_trickle.c
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mp_trickle.c,v 11.24 2002/08/06 06:13:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+/*
+ * __memp_trickle --
+ * Keep a specified percentage of the buffers clean.
+ *
+ * PUBLIC: int __memp_trickle __P((DB_ENV *, int, int *));
+ */
+int
+__memp_trickle(dbenv, pct, nwrotep)
+ DB_ENV *dbenv;
+ int pct, *nwrotep;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *c_mp, *mp;
+ u_int32_t clean, dirty, i, total, dtmp;
+ int ret, wrote;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_trickle", DB_INIT_MPOOL);
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ if (nwrotep != NULL)
+ *nwrotep = 0;
+
+ if (pct < 1 || pct > 100)
+ return (EINVAL);
+
+ /*
+ * If there are sufficient clean buffers, no buffers or no dirty
+ * buffers, we're done.
+ *
+ * XXX
+ * Using hash_page_dirty is our only choice at the moment, but it's not
+ * as correct as we might like in the presence of pools having more
+ * than one page size, as a free 512B buffer isn't the same as a free
+ * 8KB buffer.
+ *
+ * Loop through the caches counting total/dirty buffers.
+ */
+ for (ret = 0, i = dirty = total = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ total += c_mp->stat.st_pages;
+ __memp_stat_hash(&dbmp->reginfo[i], c_mp, &dtmp);
+ dirty += dtmp;
+ }
+
+ clean = total - dirty;
+ if (clean == total || (clean * 100) / total >= (u_long)pct)
+ return (0);
+
+ if (nwrotep == NULL)
+ nwrotep = &wrote;
+ ret = __memp_sync_int(dbenv, NULL,
+ ((total * pct) / 100) - clean, DB_SYNC_TRICKLE, nwrotep);
+
+ mp->stat.st_page_trickle += *nwrotep;
+
+ return (ret);
+}
diff --git a/storage/bdb/mutex/README b/storage/bdb/mutex/README
new file mode 100644
index 00000000000..323c34f1e74
--- /dev/null
+++ b/storage/bdb/mutex/README
@@ -0,0 +1,108 @@
+# $Id: README,v 11.2 1999/11/21 18:12:48 bostic Exp $
+
+Note: this only applies to locking using test-and-set and fcntl calls,
+pthreads were added after this was written.
+
+Resource locking routines: lock based on a db_mutex_t. All this gunk
+(including trying to make assembly code portable), is necessary because
+System V semaphores require system calls for uncontested locks and we
+don't want to make two system calls per resource lock.
+
+First, this is how it works. The db_mutex_t structure contains a resource
+test-and-set lock (tsl), a file offset, a pid for debugging and statistics
+information.
+
+If HAVE_MUTEX_THREADS is defined (i.e. we know how to do test-and-sets
+for this compiler/architecture combination), we try and lock the resource
+tsl __os_spin() times. If we can't acquire the lock that way, we use a
+system call to sleep for 1ms, 2ms, 4ms, etc. (The time is bounded at 1
+second, just in case.) Using the timer backoff means that there are two
+assumptions: that locks are held for brief periods (never over system
+calls or I/O) and that locks are not hotly contested.
+
+If HAVE_MUTEX_THREADS is not defined, i.e. we can't do test-and-sets, we
+use a file descriptor to do byte locking on a file at a specified offset.
+In this case, ALL of the locking is done in the kernel. Because file
+descriptors are allocated per process, we have to provide the file
+descriptor as part of the lock call. We still have to do timer backoff
+because we need to be able to block ourselves, i.e. the lock manager
+causes processes to wait by having the process acquire a mutex and then
+attempting to re-acquire the mutex. There's no way to use kernel locking
+to block yourself, i.e. if you hold a lock and attempt to re-acquire it,
+the attempt will succeed.
+
+Next, let's talk about why it doesn't work the way a reasonable person
+would think it should work.
+
+Ideally, we'd have the ability to try to lock the resource tsl, and if
+that fails, increment a counter of waiting processes, then block in the
+kernel until the tsl is released. The process holding the resource tsl
+would see the wait counter when it went to release the resource tsl, and
+would wake any waiting processes up after releasing the lock. This would
+actually require both another tsl (call it the mutex tsl) and
+synchronization between the call that blocks in the kernel and the actual
+resource tsl. The mutex tsl would be used to protect accesses to the
+db_mutex_t itself. Locking the mutex tsl would be done by a busy loop,
+which is safe because processes would never block holding that tsl (all
+they would do is try to obtain the resource tsl and set/check the wait
+count). The problem in this model is that the blocking call into the
+kernel requires a blocking semaphore, i.e. one whose normal state is
+locked.
+
+The only portable forms of locking under UNIX are fcntl(2) on a file
+descriptor/offset, and System V semaphores. Neither of these locking
+methods are sufficient to solve the problem.
+
+The problem with fcntl locking is that only the process that obtained the
+lock can release it. Remember, we want the normal state of the kernel
+semaphore to be locked. So, if the creator of the db_mutex_t were to
+initialize the lock to "locked", then a second process locks the resource
+tsl, and then a third process needs to block, waiting for the resource
+tsl, when the second process wants to wake up the third process, it can't
+because it's not the holder of the lock! For the second process to be
+the holder of the lock, we would have to make a system call per
+uncontested lock, which is what we were trying to get away from in the
+first place.
+
+There are some hybrid schemes, such as signaling the holder of the lock,
+or using a different blocking offset depending on which process is
+holding the lock, but it gets complicated fairly quickly. I'm open to
+suggestions, but I'm not holding my breath.
+
+Regardless, we use this form of locking when HAVE_SPINLOCKS is not
+defined, (i.e. we're locking in the kernel) because it doesn't have the
+limitations found in System V semaphores, and because the normal state of
+the kernel object in that case is unlocked, so the process releasing the
+lock is also the holder of the lock.
+
+The System V semaphore design has a number of other limitations that make
+it inappropriate for this task. Namely:
+
+First, the semaphore key name space is separate from the file system name
+space (although there exist methods for using file names to create
+semaphore keys). If we use a well-known key, there's no reason to believe
+that any particular key will not already be in use, either by another
+instance of the DB application or some other application, in which case
+the DB application will fail. If we create a key, then we have to use a
+file system name to rendezvous and pass around the key.
+
+Second, System V semaphores traditionally have compile-time, system-wide
+limits on the number of semaphore keys that you can have. Typically, that
+number is far too low for any practical purpose. Since the semaphores
+permit more than a single slot per semaphore key, we could try and get
+around that limit by using multiple slots, but that means that the file
+that we're using for rendezvous is going to have to contain slot
+information as well as semaphore key information, and we're going to be
+reading/writing it on every db_mutex_t init or destroy operation. Anyhow,
+similar compile-time, system-wide limits on the numbers of slots per
+semaphore key kick in, and you're right back where you started.
+
+My fantasy is that once POSIX.1 standard mutexes are in wide-spread use,
+we can switch to them. My guess is that it won't happen, because the
+POSIX semaphores are only required to work for threads within a process,
+and not independent processes.
+
+Note: there are races in the statistics code, but since it's just that,
+I didn't bother fixing them. (The fix requires a mutex tsl, so, when/if
+this code is fixed to do rational locking (see above), then change the
+statistics update code to acquire/release the mutex tsl.
diff --git a/storage/bdb/mutex/mut_fcntl.c b/storage/bdb/mutex/mut_fcntl.c
new file mode 100644
index 00000000000..2fdf9eff7ef
--- /dev/null
+++ b/storage/bdb/mutex/mut_fcntl.c
@@ -0,0 +1,184 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_fcntl.c,v 11.21 2002/05/31 19:37:45 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_fcntl_mutex_init --
+ * Initialize a DB mutex structure.
+ *
+ * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_fcntl_mutex_init(dbenv, mutexp, offset)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t offset;
+{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application is private, we don't need any locks.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+
+ mutexp->off = offset;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_lock
+ * Lock on a mutex, blocking if necessary.
+ *
+ * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ struct flock k_lock;
+ int locked, ms, waited;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+ /* Initialize the lock. */
+ k_lock.l_whence = SEEK_SET;
+ k_lock.l_start = mutexp->off;
+ k_lock.l_len = 1;
+
+ for (locked = waited = 0;;) {
+ /*
+ * Wait for the lock to become available; wait 1ms initially,
+ * up to 1 second.
+ */
+ for (ms = 1; mutexp->pid != 0;) {
+ waited = 1;
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+ }
+
+ /* Acquire an exclusive kernel lock. */
+ k_lock.l_type = F_WRLCK;
+ if (fcntl(dbenv->lockfhp->fd, F_SETLKW, &k_lock))
+ return (__os_get_errno());
+
+ /* If the resource is still available, it's ours. */
+ if (mutexp->pid == 0) {
+ locked = 1;
+ __os_id(&mutexp->pid);
+ }
+
+ /* Release the kernel lock. */
+ k_lock.l_type = F_UNLCK;
+ if (fcntl(dbenv->lockfhp->fd, F_SETLK, &k_lock))
+ return (__os_get_errno());
+
+ /*
+ * If we got the resource lock we're done.
+ *
+ * !!!
+ * We can't check to see if the lock is ours, because we may
+ * be trying to block ourselves in the lock manager, and so
+ * the holder of the lock that's preventing us from getting
+ * the lock may be us! (Seriously.)
+ */
+ if (locked)
+ break;
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
+#ifdef DIAGNOSTIC
+#define MSG "mutex_unlock: ERROR: released lock that was unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+ if (mutexp->pid == 0)
+ write(STDERR_FILENO, MSG, sizeof(MSG) - 1);
+#endif
+
+ /*
+ * Release the resource. We don't have to acquire any locks because
+ * processes trying to acquire the lock are checking for a pid set to
+ * 0/non-0, not to any specific value.
+ */
+ mutexp->pid = 0;
+
+ return (0);
+}
+
+/*
+ * __db_fcntl_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_fcntl_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ COMPQUIET(mutexp, NULL);
+
+ return (0);
+}
diff --git a/storage/bdb/mutex/mut_pthread.c b/storage/bdb/mutex/mut_pthread.c
new file mode 100644
index 00000000000..4a55ce0ca03
--- /dev/null
+++ b/storage/bdb/mutex/mut_pthread.c
@@ -0,0 +1,361 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_pthread.c,v 11.53 2002/08/13 19:56:47 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+#undef MSG1
+#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
+#undef MSG2
+#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n"
+#ifndef STDERR_FILENO
+#define STDERR_FILENO 2
+#endif
+#endif
+
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+#define pthread_cond_signal _lwp_cond_signal
+#define pthread_cond_wait _lwp_cond_wait
+#define pthread_mutex_lock _lwp_mutex_lock
+#define pthread_mutex_trylock _lwp_mutex_trylock
+#define pthread_mutex_unlock _lwp_mutex_unlock
+/*
+ * _lwp_self returns the LWP process ID which isn't a unique per-thread
+ * identifier. Use pthread_self instead, it appears to work even if we
+ * are not a pthreads application.
+ */
+#define pthread_mutex_destroy(x) 0
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+#define pthread_cond_signal cond_signal
+#define pthread_cond_wait cond_wait
+#define pthread_mutex_lock mutex_lock
+#define pthread_mutex_trylock mutex_trylock
+#define pthread_mutex_unlock mutex_unlock
+#define pthread_self thr_self
+#define pthread_mutex_destroy mutex_destroy
+#endif
+
+#define PTHREAD_UNLOCK_ATTEMPTS 5
+
+/*
+ * __db_pthread_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_pthread_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+ int ret;
+
+ ret = 0;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ }
+
+#ifdef HAVE_MUTEX_PTHREADS
+ {
+ pthread_condattr_t condattr, *condattrp = NULL;
+ pthread_mutexattr_t mutexattr, *mutexattrp = NULL;
+
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_mutexattr_init(&mutexattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
+ if (ret == 0)
+ ret = pthread_mutexattr_setpshared(
+ &mutexattr, PTHREAD_PROCESS_SHARED);
+#endif
+ mutexattrp = &mutexattr;
+ }
+
+ if (ret == 0)
+ ret = pthread_mutex_init(&mutexp->mutex, mutexattrp);
+ if (mutexattrp != NULL)
+ pthread_mutexattr_destroy(mutexattrp);
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_condattr_init(&condattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
+ if (ret == 0) {
+ condattrp = &condattr;
+ ret = pthread_condattr_setpshared(
+ &condattr, PTHREAD_PROCESS_SHARED);
+ }
+#endif
+ }
+
+ if (ret == 0)
+ ret = pthread_cond_init(&mutexp->cond, condattrp);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ if (condattrp != NULL)
+ (void)pthread_condattr_destroy(condattrp);
+ }
+
+ }
+#endif
+#ifdef HAVE_MUTEX_SOLARIS_LWP
+ /*
+ * XXX
+ * Gcc complains about missing braces in the static initializations of
+ * lwp_cond_t and lwp_mutex_t structures because the structures contain
+ * sub-structures/unions and the Solaris include file that defines the
+ * initialization values doesn't have surrounding braces. There's not
+ * much we can do.
+ */
+ if (LF_ISSET(MUTEX_THREAD)) {
+ static lwp_mutex_t mi = DEFAULTMUTEX;
+
+ mutexp->mutex = mi;
+ } else {
+ static lwp_mutex_t mi = SHAREDMUTEX;
+
+ mutexp->mutex = mi;
+ }
+ if (LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (LF_ISSET(MUTEX_THREAD)) {
+ static lwp_cond_t ci = DEFAULTCV;
+
+ mutexp->cond = ci;
+ } else {
+ static lwp_cond_t ci = SHAREDCV;
+
+ mutexp->cond = ci;
+ }
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }
+#endif
+#ifdef HAVE_MUTEX_UI_THREADS
+ {
+ int type;
+
+ type = LF_ISSET(MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
+
+ ret = mutex_init(&mutexp->mutex, type, NULL);
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ ret = cond_init(&mutexp->cond, type, NULL);
+
+ F_SET(mutexp, MUTEX_SELF_BLOCK);
+ }}
+#endif
+
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ if (ret == 0)
+ F_SET(mutexp, MUTEX_INITED);
+ else
+ __db_err(dbenv,
+ "unable to initialize mutex: %s", strerror(ret));
+
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ u_int32_t nspins;
+ int i, ret, waited;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins)
+ if (pthread_mutex_trylock(&mutexp->mutex) == 0)
+ break;
+
+ if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ goto err;
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ for (waited = 0; mutexp->locked != 0; waited = 1) {
+ ret = pthread_cond_wait(&mutexp->cond, &mutexp->mutex);
+ /*
+ * !!!
+ * Solaris bug workaround:
+ * pthread_cond_wait() sometimes returns ETIME -- out
+ * of sheer paranoia, check both ETIME and ETIMEDOUT.
+ * We believe this happens when the application uses
+ * SIGALRM for some purpose, e.g., the C library sleep
+ * call, and Solaris delivers the signal to the wrong
+ * LWP.
+ */
+ if (ret != 0 && ret != EINTR &&
+#ifdef ETIME
+ ret != ETIME &&
+#endif
+ ret != ETIMEDOUT) {
+ (void)pthread_mutex_unlock(&mutexp->mutex);
+ return (ret);
+ }
+ }
+
+ if (waited)
+ ++mutexp->mutex_set_wait;
+ else
+ ++mutexp->mutex_set_nowait;
+
+#ifdef DIAGNOSTIC
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ /*
+ * According to HP-UX engineers contacted by Netscape,
+ * pthread_mutex_unlock() will occasionally return EFAULT
+ * for no good reason on mutexes in shared memory regions,
+ * and the correct caller behavior is to try again. Do
+ * so, up to PTHREAD_UNLOCK_ATTEMPTS consecutive times.
+ * Note that we don't bother to restrict this to HP-UX;
+ * it should be harmless elsewhere. [#2471]
+ */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ if (ret != 0)
+ goto err;
+ } else {
+ if (nspins == mutexp->spins)
+ ++mutexp->mutex_set_nowait;
+ else if (nspins > 0) {
+ ++mutexp->mutex_set_spin;
+ mutexp->mutex_set_spins += mutexp->spins - nspins;
+ } else
+ ++mutexp->mutex_set_wait;
+#ifdef DIAGNOSTIC
+ if (mutexp->locked) {
+ char msgbuf[128];
+ (void)snprintf(msgbuf,
+ sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
+ (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
+ }
+ mutexp->locked = (u_int32_t)pthread_self();
+#else
+ mutexp->locked = 1;
+#endif
+ }
+ return (0);
+
+err: __db_err(dbenv, "unable to lock mutex: %s", strerror(ret));
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ int i, ret;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
+#endif
+
+ if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
+ if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
+ goto err;
+
+ mutexp->locked = 0;
+
+ if ((ret = pthread_cond_signal(&mutexp->cond)) != 0)
+ return (ret);
+
+ } else
+ mutexp->locked = 0;
+
+ /* See comment above; workaround for [#2471]. */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ return (ret);
+
+err: __db_err(dbenv, "unable to unlock mutex: %s", strerror(ret));
+ return (ret);
+}
+
+/*
+ * __db_pthread_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_pthread_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_pthread_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ int ret;
+
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ if ((ret = pthread_mutex_destroy(&mutexp->mutex)) != 0)
+ __db_err(NULL, "unable to destroy mutex: %s", strerror(ret));
+ return (ret);
+}
diff --git a/storage/bdb/mutex/mut_tas.c b/storage/bdb/mutex/mut_tas.c
new file mode 100644
index 00000000000..c24e09473ca
--- /dev/null
+++ b/storage/bdb/mutex/mut_tas.c
@@ -0,0 +1,199 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_tas.c,v 11.32 2002/05/07 18:42:21 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+/*
+ * __db_tas_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_tas_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+
+ /* Check alignment. */
+ DB_ASSERT(((db_alignp_t)mutexp & (MUTEX_ALIGN - 1)) == 0);
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ }
+
+ /* Initialize the lock. */
+ if (MUTEX_INIT(&mutexp->tas))
+ return (__os_get_errno());
+
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ mutexp->reg_off = INVALID_ROFF;
+#endif
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_tas_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ u_long ms;
+ int nspins;
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ ms = 1;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+relock:
+#endif
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
+ /*
+ * HP semaphores are unlocked automatically when a holding
+ * process exits. If the mutex appears to be locked
+ * (mutexp->locked != 0) but we got here, assume this has
+ * happened. Stick our own pid into mutexp->locked and
+ * lock again. (The default state of the mutexes used to
+ * block in __lock_get_internal is locked, so exiting with
+ * a locked mutex is reasonable behavior for a process that
+ * happened to initialize or use one of them.)
+ */
+ if (mutexp->locked != 0) {
+ __os_id(&mutexp->locked);
+ goto relock;
+ }
+ /*
+ * If we make it here, locked == 0, the diagnostic won't fire,
+ * and we were really unlocked by someone calling the
+ * DB mutex unlock function.
+ */
+#endif
+#ifdef DIAGNOSTIC
+ if (mutexp->locked != 0)
+ __db_err(dbenv,
+ "__db_tas_mutex_lock: ERROR: lock currently in use: ID: %lu",
+ (u_long)mutexp->locked);
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ __os_id(&mutexp->locked);
+#endif
+ if (ms == 1)
+ ++mutexp->mutex_set_nowait;
+ else
+ ++mutexp->mutex_set_wait;
+ return (0);
+ }
+
+ /* Yield the processor; wait 1ms initially, up to 1 second. */
+ __os_yield(NULL, ms * USEC_PER_MS);
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_tas_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_tas_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->locked)
+ __db_err(dbenv,
+ "__db_tas_mutex_unlock: ERROR: lock already unlocked");
+#endif
+#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
+ mutexp->locked = 0;
+#endif
+
+ MUTEX_UNSET(&mutexp->tas);
+
+ return (0);
+}
+
+/*
+ * __db_tas_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_tas_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_tas_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ if (F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ MUTEX_DESTROY(&mutexp->tas);
+
+ return (0);
+}
diff --git a/storage/bdb/mutex/mut_win32.c b/storage/bdb/mutex/mut_win32.c
new file mode 100644
index 00000000000..49eb20a6ecf
--- /dev/null
+++ b/storage/bdb/mutex/mut_win32.c
@@ -0,0 +1,257 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_win32.c,v 1.8 2002/09/10 02:37:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+/* We don't want to run this code even in "ordinary" diagnostic mode. */
+#undef MUTEX_DIAG
+
+#define GET_HANDLE(mutexp, event) do { \
+ char idbuf[13]; \
+ \
+ if (F_ISSET(mutexp, MUTEX_THREAD)) { \
+ event = mutexp->event; \
+ return (0); \
+ } \
+ \
+ snprintf(idbuf, sizeof idbuf, "db.m%08x", mutexp->id); \
+ event = CreateEvent(NULL, FALSE, FALSE, idbuf); \
+ if (event == NULL) \
+ return (__os_win32_errno()); \
+} while (0)
+
+#define RELEASE_HANDLE(mutexp, event) \
+ if (!F_ISSET(mutexp, MUTEX_THREAD) && event != NULL) { \
+ CloseHandle(event); \
+ event = NULL; \
+ }
+
+/*
+ * __db_win32_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_win32_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ F_SET(mutexp, MUTEX_THREAD);
+ mutexp->event = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (mutexp->event == NULL)
+ return (__os_win32_errno());
+ } else
+ mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp);
+
+ mutexp->spins = __os_spin(dbenv);
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_win32_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ HANDLE event;
+ int ret, ms, nspins;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ event = NULL;
+ ms = 50;
+ ret = 0;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+
+#ifdef DIAGNOSTIC
+ if (mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_lock: mutex double-locked!");
+
+ __os_id(&mutexp->locked);
+#endif
+
+ if (event == NULL)
+ ++mutexp->mutex_set_nowait;
+ else {
+ ++mutexp->mutex_set_wait;
+ RELEASE_HANDLE(mutexp, event);
+ InterlockedDecrement(&mutexp->nwaiters);
+#ifdef MUTEX_DIAG
+ if (ret != WAIT_OBJECT_0) {
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Lost signal on mutex %p, "
+ "id %d, ms %d\n",
+ now.QuadPart, mutexp, mutexp->id, ms);
+ }
+#endif
+ }
+
+ return (0);
+ }
+
+ /*
+ * Yield the processor; wait 50 ms initially, up to 1 second. This
+ * loop is needed to work around a race where the signal from the
+ * unlocking thread gets lost. We start at 50 ms because it's unlikely
+ * to happen often and we want to avoid wasting CPU.
+ */
+ if (event == NULL) {
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Waiting on mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ InterlockedIncrement(&mutexp->nwaiters);
+ GET_HANDLE(mutexp, event);
+ }
+ if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED)
+ return (__os_win32_errno());
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_win32_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ int ret;
+ HANDLE event;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->tas || !mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: lock already unlocked");
+
+ mutexp->locked = 0;
+#endif
+ MUTEX_UNSET(&mutexp->tas);
+
+ ret = 0;
+
+ if (mutexp->nwaiters > 0) {
+ GET_HANDLE(mutexp, event);
+
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Signalling mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ if (!PulseEvent(event))
+ ret = __os_win32_errno();
+
+ RELEASE_HANDLE(mutexp, event);
+ }
+
+#ifdef DIAGNOSTIC
+ if (ret != 0)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: unlock failed");
+#endif
+
+ return (ret);
+}
+
+/*
+ * __db_win32_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_win32_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_win32_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ int ret;
+
+ if (F_ISSET(mutexp, MUTEX_IGNORE) || !F_ISSET(mutexp, MUTEX_THREAD))
+ return (0);
+
+ ret = 0;
+ if (mutexp->event != NULL) {
+ if (!CloseHandle(mutexp->event))
+ ret = __os_win32_errno();
+ mutexp->event = NULL;
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/mutex/mutex.c b/storage/bdb/mutex/mutex.c
new file mode 100644
index 00000000000..5418764a889
--- /dev/null
+++ b/storage/bdb/mutex/mutex.c
@@ -0,0 +1,395 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mutex.c,v 11.37 2002/05/31 19:37:46 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/txn.h"
+#endif
+
+static int __db_mutex_alloc_int __P((DB_ENV *, REGINFO *, DB_MUTEX **));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static REGMAINT * __db_mutex_maint __P((DB_ENV *, REGINFO *));
+#endif
+
+/*
+ * __db_mutex_setup --
+ * External interface to allocate, and/or initialize, record
+ * mutexes.
+ *
+ * PUBLIC: int __db_mutex_setup __P((DB_ENV *, REGINFO *, void *, u_int32_t));
+ */
+int
+__db_mutex_setup(dbenv, infop, ptr, flags)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ void *ptr;
+ u_int32_t flags;
+{
+ DB_MUTEX *mutex;
+ REGMAINT *maint;
+ u_int32_t iflags, offset;
+ int ret;
+
+ ret = 0;
+ /*
+ * If they indicated the region is not locked, then lock it.
+ * This is only needed when we have unusual mutex resources.
+ * (I.e. MUTEX_NO_MALLOC_LOCKS or HAVE_MUTEX_SYSTEM_RESOURCES)
+ */
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_LOCK(dbenv, infop);
+#endif
+ /*
+ * Allocate the mutex if they asked us to.
+ */
+ mutex = NULL;
+ if (LF_ISSET(MUTEX_ALLOC)) {
+ if ((ret = __db_mutex_alloc_int(dbenv, infop, ptr)) != 0)
+ goto err;
+ mutex = *(DB_MUTEX **)ptr;
+ } else
+ mutex = (DB_MUTEX *)ptr;
+
+ /*
+ * Set up to initialize the mutex.
+ */
+ iflags = LF_ISSET(MUTEX_THREAD | MUTEX_SELF_BLOCK);
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_LOCK;
+ break;
+ case REGION_TYPE_MPOOL:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_MPOOL;
+ break;
+ default:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_GEN;
+ break;
+ }
+ maint = NULL;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (!LF_ISSET(MUTEX_NO_RECORD))
+ maint = (REGMAINT *)__db_mutex_maint(dbenv, infop);
+#endif
+
+ ret = __db_mutex_init(dbenv, mutex, offset, iflags, infop, maint);
+err:
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_UNLOCK(dbenv, infop);
+#endif
+ /*
+ * If we allocated the mutex but had an error on init'ing,
+ * then we must free it before returning.
+ * !!!
+ * Free must be done after releasing region lock.
+ */
+ if (ret != 0 && LF_ISSET(MUTEX_ALLOC) && mutex != NULL) {
+ __db_mutex_free(dbenv, infop, mutex);
+ *(DB_MUTEX **)ptr = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __db_mutex_alloc_int --
+ * Allocate and initialize a mutex.
+ */
+static int
+__db_mutex_alloc_int(dbenv, infop, storep)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ DB_MUTEX **storep;
+{
+ int ret;
+
+ /*
+ * If the architecture supports mutexes in heap memory, use heap memory.
+ * If it doesn't, we have to allocate space in a region. If allocation
+ * in the region fails, fallback to allocating from the mpool region,
+ * because it's big, it almost always exists and if it's entirely dirty,
+ * we can free buffers until memory is available.
+ */
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX), MUTEX_ALIGN, storep);
+
+ if (ret == ENOMEM && MPOOL_ON(dbenv)) {
+ DB_MPOOL *dbmp;
+
+ dbmp = dbenv->mp_handle;
+ if ((ret = __memp_alloc(dbmp,
+ dbmp->reginfo, NULL, sizeof(DB_MUTEX), NULL, storep)) == 0)
+ (*storep)->flags = MUTEX_MPOOL;
+ } else
+ (*storep)->flags = 0;
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ ret = __os_calloc(dbenv, 1, sizeof(DB_MUTEX), storep);
+#endif
+ if (ret != 0)
+ __db_err(dbenv, "Unable to allocate memory for mutex");
+ return (ret);
+}
+
+/*
+ * __db_mutex_free --
+ * Free a mutex.
+ *
+ * PUBLIC: void __db_mutex_free __P((DB_ENV *, REGINFO *, DB_MUTEX *));
+ */
+void
+__db_mutex_free(dbenv, infop, mutexp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ DB_MUTEX *mutexp;
+{
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ R_LOCK(dbenv, infop);
+#if defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (F_ISSET(mutexp, MUTEX_INITED))
+ __db_shlocks_clear(mutexp, infop, NULL);
+#endif
+ if (F_ISSET(mutexp, MUTEX_MPOOL)) {
+ DB_MPOOL *dbmp;
+
+ dbmp = dbenv->mp_handle;
+ R_LOCK(dbenv, dbmp->reginfo);
+ __db_shalloc_free(dbmp->reginfo[0].addr, mutexp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ } else
+ __db_shalloc_free(infop->addr, mutexp);
+ R_UNLOCK(dbenv, infop);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+ __os_free(dbenv, mutexp);
+#endif
+}
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * __db_shreg_locks_record --
+ * Record an entry in the shared locks area.
+ * Region lock must be held in caller.
+ */
+static int
+__db_shreg_locks_record(dbenv, mutexp, infop, rp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int i;
+
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return (0);
+ DB_ASSERT(mutexp->reg_off == INVALID_ROFF);
+ rp->stat.st_records++;
+ i = (roff_t *)R_ADDR(infop, rp->regmutex_hint) - &rp->regmutexes[0];
+ if (rp->regmutexes[i] != INVALID_ROFF) {
+ /*
+ * Our hint failed, search for an open slot.
+ */
+ rp->stat.st_hint_miss++;
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] == INVALID_ROFF)
+ break;
+ if (i == rp->reglocks) {
+ rp->stat.st_max_locks++;
+ __db_err(dbenv,
+ "Region mutexes: Exceeded maximum lock slots %lu",
+ (u_long)rp->reglocks);
+ return (ENOMEM);
+ }
+ } else
+ rp->stat.st_hint_hit++;
+ /*
+ * When we get here, i is an empty slot. Record this
+ * mutex, set hint to point to the next slot and we are done.
+ */
+ rp->regmutexes[i] = R_OFFSET(infop, mutexp);
+ mutexp->reg_off = R_OFFSET(infop, &rp->regmutexes[i]);
+ rp->regmutex_hint = (i < rp->reglocks - 1) ?
+ R_OFFSET(infop, &rp->regmutexes[i+1]) :
+ R_OFFSET(infop, &rp->regmutexes[0]);
+ return (0);
+}
+
+/*
+ * __db_shreg_locks_clear --
+ * Erase an entry in the shared locks area.
+ *
+ * PUBLIC: void __db_shreg_locks_clear __P((DB_MUTEX *, REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_clear(mutexp, infop, rp)
+ DB_MUTEX *mutexp;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ /*
+ * !!!
+ * Assumes the caller's region lock is held.
+ */
+ if (!F_ISSET(mutexp, MUTEX_INITED))
+ return;
+ /*
+ * This function is generally only called on a forcible remove of an
+ * environment. We recorded our index in the mutex, find and clear it.
+ */
+ DB_ASSERT(mutexp->reg_off != INVALID_ROFF);
+ DB_ASSERT(*(roff_t *)R_ADDR(infop, mutexp->reg_off) == \
+ R_OFFSET(infop, mutexp));
+ *(roff_t *)R_ADDR(infop, mutexp->reg_off) = 0;
+ if (rp != NULL) {
+ rp->regmutex_hint = mutexp->reg_off;
+ rp->stat.st_clears++;
+ }
+ mutexp->reg_off = INVALID_ROFF;
+ __db_mutex_destroy(mutexp);
+}
+
+/*
+ * __db_shreg_locks_destroy --
+ * Destroy all mutexes in a region's range.
+ *
+ * PUBLIC: void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+ */
+void
+__db_shreg_locks_destroy(infop, rp)
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ u_int32_t i;
+
+ /*
+ * Go through the list of all mutexes and destroy them.
+ */
+ for (i = 0; i < rp->reglocks; i++)
+ if (rp->regmutexes[i] != 0) {
+ rp->stat.st_destroys++;
+ __db_mutex_destroy((DB_MUTEX *)R_ADDR(infop,
+ rp->regmutexes[i]));
+ }
+}
+
+/*
+ * __db_shreg_mutex_init --
+ * Initialize a shared memory mutex.
+ *
+ * PUBLIC: int __db_shreg_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t,
+ * PUBLIC: u_int32_t, REGINFO *, REGMAINT *));
+ */
+int
+__db_shreg_mutex_init(dbenv, mutexp, offset, flags, infop, rp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t offset;
+ u_int32_t flags;
+ REGINFO *infop;
+ REGMAINT *rp;
+{
+ int ret;
+
+ if ((ret = __db_mutex_init_int(dbenv, mutexp, offset, flags)) != 0)
+ return (ret);
+ /*
+ * Some mutexes cannot be recorded, but we want one interface.
+ * So, if we have no REGMAINT, then just return.
+ */
+ if (rp == NULL)
+ return (ret);
+ /*
+ * !!!
+ * Since __db_mutex_init_int is a macro, we may not be
+ * using the 'offset' as it is only used for one type
+ * of mutex. We COMPQUIET it here, after the call above.
+ */
+ COMPQUIET(offset, 0);
+ ret = __db_shreg_locks_record(dbenv, mutexp, infop, rp);
+
+ /*
+ * If we couldn't record it and we are returning an error,
+ * we need to destroy the mutex we just created.
+ */
+ if (ret)
+ __db_mutex_destroy(mutexp);
+
+ return (ret);
+}
+
+/*
+ * __db_shreg_maintinit --
+ * Initialize a region's maintenance information.
+ *
+ * PUBLIC: void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+ */
+void
+__db_shreg_maintinit(infop, addr, size)
+ REGINFO *infop;
+ void *addr;
+ size_t size;
+{
+ REGMAINT *rp;
+ u_int32_t i;
+
+ rp = (REGMAINT *)addr;
+ memset(addr, 0, sizeof(REGMAINT));
+ rp->reglocks = size / sizeof(roff_t);
+ rp->regmutex_hint = R_OFFSET(infop, &rp->regmutexes[0]);
+ for (i = 0; i < rp->reglocks; i++)
+ rp->regmutexes[i] = INVALID_ROFF;
+}
+
+static REGMAINT *
+__db_mutex_maint(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ roff_t moff;
+
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ moff = ((DB_LOCKREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_LOG:
+ moff = ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_MPOOL:
+ moff = ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_TXN:
+ moff = ((DB_TXNREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ default:
+ __db_err(dbenv,
+ "Attempting to record mutex in a region not set up to do so");
+ return (NULL);
+ }
+ return ((REGMAINT *)R_ADDR(infop, moff));
+}
+#endif /* HAVE_MUTEX_SYSTEM_RESOURCES */
diff --git a/storage/bdb/mutex/tm.c b/storage/bdb/mutex/tm.c
new file mode 100644
index 00000000000..4af1b1907a8
--- /dev/null
+++ b/storage/bdb/mutex/tm.c
@@ -0,0 +1,627 @@
+/*
+ * Standalone mutex tester for Berkeley DB mutexes.
+ */
+#include "db_config.h"
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+#include <pthread.h>
+#endif
+
+#include "db_int.h"
+
+void exec_proc();
+void tm_file_init();
+void map_file();
+void run_proc();
+void *run_thread();
+void *run_thread_wake();
+void tm_mutex_destroy();
+void tm_mutex_init();
+void tm_mutex_stats();
+void unmap_file();
+
+#define MUTEX_WAKEME 0x80 /* Wake-me flag. */
+
+DB_ENV dbenv; /* Fake out DB. */
+size_t len; /* Backing file size. */
+int align; /* Mutex alignment in file. */
+int quit; /* End-of-test flag. */
+char *file = "mutex.file"; /* Backing file. */
+
+int maxlocks = 20; /* -l: Backing locks. */
+int nlocks = 10000; /* -n: Locks per processes. */
+int nprocs = 20; /* -p: Processes. */
+int child; /* -s: Slave. */
+int nthreads = 1; /* -t: Threads. */
+int verbose; /* -v: Verbosity. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind;
+ extern char *optarg;
+ pid_t pid;
+ int ch, eval, i, status;
+ char *tmpath;
+
+ tmpath = argv[0];
+ while ((ch = getopt(argc, argv, "l:n:p:st:v")) != EOF)
+ switch(ch) {
+ case 'l':
+ maxlocks = atoi(optarg);
+ break;
+ case 'n':
+ nlocks = atoi(optarg);
+ break;
+ case 'p':
+ nprocs = atoi(optarg);
+ break;
+ case 's':
+ child = 1;
+ break;
+ case 't':
+ nthreads = atoi(optarg);
+#if !defined(HAVE_MUTEX_PTHREADS) && !defined(BUILD_PTHREADS_ANYWAY)
+ if (nthreads != 1) {
+ (void)fprintf(stderr,
+ "tm: pthreads not available or not compiled for this platform.\n");
+ return (EXIT_FAILURE);
+ }
+#endif
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ (void)fprintf(stderr,
+ "usage: tm [-v] [-l maxlocks] [-n locks] [-p procs] [-t threads]\n");
+ return (EXIT_FAILURE);
+ }
+ argc -= optind;
+ argv += optind;
+
+ /*
+ * The file layout:
+ * DB_MUTEX[1] per-thread mutex array lock
+ * DB_MUTEX[nthreads] per-thread mutex array
+ * DB_MUTEX[maxlocks] per-lock mutex array
+ * u_long[maxlocks][2] per-lock ID array
+ */
+ align = ALIGN(sizeof(DB_MUTEX) * 2, MUTEX_ALIGN);
+ len =
+ align * (1 + nthreads + maxlocks) + sizeof(u_long) * maxlocks * 2;
+ printf(
+ "mutex alignment %d, structure alignment %d, backing file %lu bytes\n",
+ MUTEX_ALIGN, align, (u_long)len);
+
+ if (child) {
+ run_proc();
+ return (EXIT_SUCCESS);
+ }
+
+ tm_file_init();
+ tm_mutex_init();
+
+ printf(
+ "%d proc, %d threads/proc, %d lock requests from %d locks:\n",
+ nprocs, nthreads, nlocks, maxlocks);
+ for (i = 0; i < nprocs; ++i)
+ switch (fork()) {
+ case -1:
+ perror("fork");
+ return (EXIT_FAILURE);
+ case 0:
+ exec_proc(tmpath);
+ break;
+ default:
+ break;
+ }
+
+ eval = EXIT_SUCCESS;
+ while ((pid = wait(&status)) != (pid_t)-1) {
+ fprintf(stderr,
+ "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status));
+ if (WEXITSTATUS(status) != 0)
+ eval = EXIT_FAILURE;
+ }
+
+ tm_mutex_stats();
+ tm_mutex_destroy();
+
+ printf("tm: exit status: %s\n",
+ eval == EXIT_SUCCESS ? "success" : "failed!");
+ return (eval);
+}
+
+void
+exec_proc(tmpath)
+ char *tmpath;
+{
+ char *argv[10], **ap, b_l[10], b_n[10], b_t[10];
+
+ ap = &argv[0];
+ *ap++ = "tm";
+ sprintf(b_l, "-l%d", maxlocks);
+ *ap++ = b_l;
+ sprintf(b_n, "-n%d", nlocks);
+ *ap++ = b_n;
+ *ap++ = "-s";
+ sprintf(b_t, "-t%d", nthreads);
+ *ap++ = b_t;
+ if (verbose)
+ *ap++ = "-v";
+
+ *ap = NULL;
+ execvp(tmpath, argv);
+
+ fprintf(stderr, "%s: %s\n", tmpath, strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+void
+run_proc()
+{
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ pthread_t *kidsp, wakep;
+ int i, status;
+ void *retp;
+#endif
+ __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */
+
+ srand((u_int)time(NULL) / getpid()); /* Initialize random numbers. */
+
+ if (nthreads == 1) /* Simple case. */
+ exit((int)run_thread((void *)0));
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Spawn off threads. We have nthreads all locking and going to
+ * sleep, and one other thread cycling through and waking them up.
+ */
+ if ((kidsp =
+ (pthread_t *)calloc(sizeof(pthread_t), nthreads)) == NULL) {
+ fprintf(stderr, "tm: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i < nthreads; i++)
+ if ((errno = pthread_create(
+ &kidsp[i], NULL, run_thread, (void *)i)) != 0) {
+ fprintf(stderr, "tm: failed spawning thread %d: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if ((errno = pthread_create(
+ &wakep, NULL, run_thread_wake, (void *)0)) != 0) {
+ fprintf(stderr, "tm: failed spawning wakeup thread: %s\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* Wait for the threads to exit. */
+ status = 0;
+ for (i = 0; i < nthreads; i++) {
+ pthread_join(kidsp[i], &retp);
+ if (retp != NULL) {
+ fprintf(stderr,
+ "tm: thread %d exited with error\n", i);
+ status = EXIT_FAILURE;
+ }
+ }
+ free(kidsp);
+
+ /* Signal wakeup thread to stop. */
+ quit = 1;
+ pthread_join(wakep, &retp);
+ if (retp != NULL) {
+ fprintf(stderr, "tm: wakeup thread exited with error\n");
+ status = EXIT_FAILURE;
+ }
+
+ exit(status);
+#endif
+}
+
+void *
+run_thread(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *lm_addr, *tm_addr, *mp;
+ u_long gid1, gid2, *id_addr;
+ int fd, i, lock, id, nl, remap;
+
+ /* Set local and global per-thread ID. */
+ id = (int)arg;
+ gid1 = (u_long)getpid();
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ gid2 = (u_long)pthread_self();
+#else
+ gid2 = 0;
+#endif
+ printf("\tPID: %lu; TID: %lx; ID: %d\n", gid1, gid2, id);
+
+ nl = nlocks;
+ for (gm_addr = NULL, remap = 0;;) {
+ /* Map in the file as necessary. */
+ if (gm_addr == NULL) {
+ map_file(&gm_addr, &tm_addr, &lm_addr, &id_addr, &fd);
+ remap = (rand() % 100) + 35;
+ }
+
+ /* Select and acquire a data lock. */
+ lock = rand() % maxlocks;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (verbose)
+ printf("%lu/%lx: %03d\n", gid1, gid2, lock);
+
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: never got lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (id_addr[lock * 2] != 0) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx granted lock %d held by %lu/%lx)\n",
+ gid1, gid2,
+ lock, id_addr[lock * 2], id_addr[lock * 2 + 1]);
+ return ((void *)EXIT_FAILURE);
+ }
+ id_addr[lock * 2] = gid1;
+ id_addr[lock * 2 + 1] = gid2;
+
+ /*
+ * Pretend to do some work, periodically checking to see if
+ * we still hold the mutex.
+ */
+ for (i = 0; i < 3; ++i) {
+ __os_sleep(&dbenv, 0, rand() % 3);
+ if (id_addr[lock * 2] != gid1 ||
+ id_addr[lock * 2 + 1] != gid2) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx stole lock %d from %lu/%lx)\n",
+ id_addr[lock * 2],
+ id_addr[lock * 2 + 1], lock, gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Test self-blocking and unlocking by other threads/processes:
+ *
+ * acquire the global lock
+ * set our wakeup flag
+ * release the global lock
+ * acquire our per-thread lock
+ *
+ * The wakeup thread will wake us up.
+ */
+ if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "%lu/%lx: global lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ F_SET(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ /* Time passes... */
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ fprintf(stderr, "%lu/%lx: %03d wakeup flag still set\n",
+ gid1, gid2, id);
+ return ((void *)EXIT_FAILURE);
+ }
+#endif
+
+ /* Release the data lock. */
+ id_addr[lock * 2] = id_addr[lock * 2 + 1] = 0;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "%lu/%lx: wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if (--nl % 100 == 0)
+ fprintf(stderr, "%lu/%lx: %d\n", gid1, gid2, nl);
+
+ if (nl == 0 || --remap == 0) {
+ unmap_file((void *)gm_addr, fd);
+ gm_addr = NULL;
+
+ if (nl == 0)
+ break;
+
+ __os_sleep(&dbenv, rand() % 3, 0);
+ }
+ }
+
+ return (NULL);
+}
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+/*
+ * run_thread_wake --
+ * Thread to wake up other threads that are sleeping.
+ */
+void *
+run_thread_wake(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *tm_addr, *mp;
+ int fd, id;
+
+ arg = NULL;
+ map_file(&gm_addr, &tm_addr, NULL, NULL, &fd);
+
+ /* Loop, waking up sleepers and periodically sleeping ourselves. */
+ while (!quit) {
+ id = 0;
+
+ /* Acquire the global lock. */
+retry: if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global lock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+next: mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ F_CLR(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "wt: wakeup failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+ if (++id < nthreads && id % 3 != 0)
+ goto next;
+
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global unlock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ __os_sleep(&dbenv, 0, 500);
+
+ if (id < nthreads)
+ goto retry;
+ }
+ return (NULL);
+}
+#endif
+
+/*
+ * tm_file_init --
+ * Initialize the backing file.
+ */
+void
+tm_file_init()
+{
+ int fd;
+
+
+ /* Initialize the backing file. */
+ printf("Create the backing file...\n");
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+ if ((fd = shm_open(file, O_CREAT | O_RDWR | O_TRUNC,
+#else
+ (void)remove(file);
+ if ((fd = open(file, O_CREAT | O_RDWR | O_TRUNC,
+#endif
+
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) == -1) {
+ (void)fprintf(stderr, "%s: open: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (lseek(fd, (off_t)len, SEEK_SET) != len || write(fd, &fd, 1) != 1) {
+ (void)fprintf(stderr,
+ "%s: seek/write: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ (void)close(fd);
+}
+
+/*
+ * tm_mutex_init --
+ * Initialize the mutexes.
+ */
+void
+tm_mutex_init()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Initialize the global mutex...\n");
+ if (__db_mutex_init_int(&dbenv, gm_addr, 0, 0)) {
+ fprintf(stderr,
+ "__db_mutex_init (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Initialize the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_init_int(&dbenv, mp, 0, MUTEX_SELF_BLOCK)) {
+ fprintf(stderr, "__db_mutex_init (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "__db_mutex_init (per-thread %d) lock: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Initialize the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_init_int(&dbenv, mp, 0, 0)) {
+ fprintf(stderr, "__db_mutex_init (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * tm_mutex_destroy --
+ * Destroy the mutexes.
+ */
+void
+tm_mutex_destroy()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Destroy the global mutex...\n");
+ if (__db_mutex_destroy(gm_addr)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Destroy the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Destroy the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+#endif
+}
+
+/*
+ * tm_mutex_stats --
+ * Display mutex statistics.
+ */
+void
+tm_mutex_stats()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp;
+ int fd, i;
+
+ map_file(&gm_addr, NULL, &lm_addr, NULL, &fd);
+
+ printf("Per-lock mutex statistics...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ printf("mutex %2d: wait: %lu; no wait %lu\n", i,
+ (u_long)mp->mutex_set_wait, (u_long)mp->mutex_set_nowait);
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * map_file --
+ * Map in the backing file.
+ */
+void
+map_file(gm_addrp, tm_addrp, lm_addrp, id_addrp, fdp)
+ DB_MUTEX **gm_addrp, **tm_addrp, **lm_addrp;
+ u_long **id_addrp;
+ int *fdp;
+{
+ void *maddr;
+ int fd;
+
+#ifndef MAP_FAILED
+#define MAP_FAILED (void *)-1
+#endif
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+#ifdef HAVE_QNX
+ if ((fd = shm_open(file, O_RDWR, 0)) == -1) {
+#else
+ if ((fd = open(file, O_RDWR, 0)) == -1) {
+#endif
+ fprintf(stderr, "%s: open %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ maddr = mmap(NULL, len,
+ PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, (off_t)0);
+ if (maddr == MAP_FAILED) {
+ fprintf(stderr, "%s: mmap: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (gm_addrp != NULL)
+ *gm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align;
+ if (tm_addrp != NULL)
+ *tm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * nthreads;
+ if (lm_addrp != NULL)
+ *lm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * maxlocks;
+ if (id_addrp != NULL)
+ *id_addrp = (u_long *)maddr;
+ if (fdp != NULL)
+ *fdp = fd;
+}
+
+/*
+ * unmap_file --
+ * Discard backing file map.
+ */
+void
+unmap_file(maddr, fd)
+ void *maddr;
+ int fd;
+{
+ if (munmap(maddr, len) != 0) {
+ fprintf(stderr, "munmap: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (close(fd) != 0) {
+ fprintf(stderr, "close: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+}
diff --git a/storage/bdb/mutex/uts4_cc.s b/storage/bdb/mutex/uts4_cc.s
new file mode 100644
index 00000000000..9ebc45aad54
--- /dev/null
+++ b/storage/bdb/mutex/uts4_cc.s
@@ -0,0 +1,27 @@
+ / See the file LICENSE for redistribution information.
+ /
+ / Copyright (c) 1997-2002
+ / Sleepycat Software. All rights reserved.
+ /
+ / $Id: uts4_cc.s,v 11.2 2002/04/25 13:42:14 bostic Exp $
+ /
+ / int uts_lock ( int *p, int i );
+ / Update the lock word pointed to by p with the
+ / value i, using compare-and-swap.
+ / Returns 0 if update was successful.
+ / Returns 1 if update failed.
+ /
+ entry uts_lock
+ uts_lock:
+ using .,r15
+ st r2,8(sp) / Save R2
+ l r2,64+0(sp) / R2 -> word to update
+ slr r0, r0 / R0 = current lock value must be 0
+ l r1,64+4(sp) / R1 = new lock value
+ cs r0,r1,0(r2) / Try the update ...
+ be x / ... Success. Return 0
+ la r0,1 / ... Failure. Return 1
+ x: /
+ l r2,8(sp) / Restore R2
+ b 2(,r14) / Return to caller
+ drop r15
diff --git a/storage/bdb/os/os_abs.c b/storage/bdb/os/os_abs.c
new file mode 100644
index 00000000000..cd7d0a5d2be
--- /dev/null
+++ b/storage/bdb/os/os_abs.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_abs.c,v 11.5 2002/01/11 15:52:58 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ *
+ * PUBLIC: int __os_abspath __P((const char *));
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ return (path[0] == '/');
+}
diff --git a/storage/bdb/os/os_alloc.c b/storage/bdb/os/os_alloc.c
new file mode 100644
index 00000000000..5b38cc7d6f1
--- /dev/null
+++ b/storage/bdb/os/os_alloc.c
@@ -0,0 +1,458 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_alloc.c,v 11.32 2002/08/06 04:57:07 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef DIAGNOSTIC
+static void __os_guard __P((DB_ENV *));
+
+union __db_alloc {
+ size_t size;
+ double align;
+};
+#endif
+
+/*
+ * !!!
+ * Correct for systems that return NULL when you allocate 0 bytes of memory.
+ * There are several places in DB where we allocate the number of bytes held
+ * by the key/data item, and it can be 0. Correct here so that malloc never
+ * returns a NULL for that reason (which behavior is permitted by ANSI). We
+ * could make these calls macros on non-Alpha architectures (that's where we
+ * saw the problem), but it's probably not worth the autoconf complexity.
+ *
+ * !!!
+ * Correct for systems that don't set errno when malloc and friends fail.
+ *
+ * !!!
+ * There is no circumstance in which we can call __os_umalloc, __os_urealloc
+ * or __os_ufree without an environment handle, as we need one to determine
+ * whether or not to use an application-specified malloc function. If we
+ * don't have an environment handle, we should be calling __os_XXX instead.
+ * Make DIAGNOSTIC blow up if we get this wrong.
+ *
+ * Out of memory.
+ * We wish to hold the whole sky,
+ * But we never will.
+ */
+
+/*
+ * __os_umalloc --
+ * A malloc(3) function that will use, in order of preference,
+ * the allocation function specified to the DB handle, the DB_ENV
+ * handle, or __os_malloc.
+ *
+ * PUBLIC: int __os_umalloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_umalloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_malloc == NULL) {
+ if (DB_GLOBAL(j_malloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_malloc)(size);
+ else
+ *(void **)storep = malloc(size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct error return, see __os_malloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_malloc(size)) == NULL) {
+ __db_err(dbenv, "User-specified malloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_urealloc --
+ * realloc(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_urealloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_urealloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *ptr;
+
+ ptr = *(void **)storep;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_realloc == NULL) {
+ if (ptr == NULL)
+ return (__os_umalloc(dbenv, size, storep));
+
+ if (DB_GLOBAL(j_realloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_realloc)(ptr, size);
+ else
+ *(void **)storep = realloc(ptr, size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct errno, see __os_realloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_realloc(ptr, size)) == NULL) {
+ __db_err(dbenv,
+ "User-specified realloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_ufree --
+ * free(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_ufree __P((DB_ENV *, void *));
+ */
+int
+__os_ufree(dbenv, ptr)
+ DB_ENV *dbenv;
+ void *ptr;
+{
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ if (dbenv != NULL && dbenv->db_free != NULL)
+ dbenv->db_free(ptr);
+ else if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
+ else
+ free(ptr);
+
+ return (0);
+}
+
+/*
+ * __os_strdup --
+ * The strdup(3) function for DB.
+ *
+ * PUBLIC: int __os_strdup __P((DB_ENV *, const char *, void *));
+ */
+int
+__os_strdup(dbenv, str, storep)
+ DB_ENV *dbenv;
+ const char *str;
+ void *storep;
+{
+ size_t size;
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ size = strlen(str) + 1;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+
+ memcpy(p, str, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_calloc --
+ * The calloc(3) function for DB.
+ *
+ * PUBLIC: int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+ */
+int
+__os_calloc(dbenv, num, size, storep)
+ DB_ENV *dbenv;
+ size_t num, size;
+ void *storep;
+{
+ void *p;
+ int ret;
+
+ size *= num;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+
+ memset(p, 0, size);
+
+ *(void **)storep = p;
+ return (0);
+}
+
+/*
+ * __os_malloc --
+ * The malloc(3) function for DB.
+ *
+ * PUBLIC: int __os_malloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_malloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *p;
+
+ *(void **)storep = NULL;
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+#ifdef DIAGNOSTIC
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
+#endif
+
+ if (DB_GLOBAL(j_malloc) != NULL)
+ p = DB_GLOBAL(j_malloc)(size);
+ else
+ p = malloc(size);
+ if (p == NULL) {
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+
+#ifdef DIAGNOSTIC
+ /*
+ * Guard bytes: if #DIAGNOSTIC is defined, we allocate an additional
+ * byte after the memory and set it to a special value that we check
+ * for when the memory is free'd.
+ */
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE;
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
+#endif
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_realloc --
+ * The realloc(3) function for DB.
+ *
+ * PUBLIC: int __os_realloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_realloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *p, *ptr;
+
+ ptr = *(void **)storep;
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ /* If we haven't yet allocated anything yet, simply call malloc. */
+ if (ptr == NULL)
+ return (__os_malloc(dbenv, size, storep));
+
+#ifdef DIAGNOSTIC
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
+
+ /* Back up to the real begining */
+ ptr = &((union __db_alloc *)ptr)[-1];
+#endif
+
+ /*
+ * Don't overwrite the original pointer, there are places in DB we
+ * try to continue after realloc fails.
+ */
+ if (DB_GLOBAL(j_realloc) != NULL)
+ p = DB_GLOBAL(j_realloc)(ptr, size);
+ else
+ p = realloc(ptr, size);
+ if (p == NULL) {
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+#ifdef DIAGNOSTIC
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE; /* Initialize guard byte. */
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
+#endif
+
+ *(void **)storep = p;
+
+ return (0);
+}
+
+/*
+ * __os_free --
+ * The free(3) function for DB.
+ *
+ * PUBLIC: void __os_free __P((DB_ENV *, void *));
+ */
+void
+__os_free(dbenv, ptr)
+ DB_ENV *dbenv;
+ void *ptr;
+{
+#ifdef DIAGNOSTIC
+ int size;
+ /*
+ * Check that the guard byte (one past the end of the memory) is
+ * still CLEAR_BYTE.
+ */
+ if (ptr == NULL)
+ return;
+
+ ptr = &((union __db_alloc *)ptr)[-1];
+ size = ((union __db_alloc *)ptr)->size;
+ if (((u_int8_t *)ptr)[size - 1] != CLEAR_BYTE)
+ __os_guard(dbenv);
+
+ /* Clear memory. */
+ if (size != 0)
+ memset(ptr, CLEAR_BYTE, size);
+#endif
+ COMPQUIET(dbenv, NULL);
+
+ if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
+ else
+ free(ptr);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __os_guard --
+ * Complain and abort.
+ */
+static void
+__os_guard(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "Guard byte incorrect during free");
+ abort();
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * __ua_memcpy --
+ * Copy memory to memory without relying on any kind of alignment.
+ *
+ * There are places in DB that we have unaligned data, for example,
+ * when we've stored a structure in a log record as a DBT, and now
+ * we want to look at it. Unfortunately, if you have code like:
+ *
+ * struct a {
+ * int x;
+ * } *p;
+ *
+ * void *func_argument;
+ * int local;
+ *
+ * p = (struct a *)func_argument;
+ * memcpy(&local, p->x, sizeof(local));
+ *
+ * compilers optimize to use inline instructions requiring alignment,
+ * and records in the log don't have any particular alignment. (This
+ * isn't a compiler bug, because it's a structure they're allowed to
+ * assume alignment.)
+ *
+ * Casting the memcpy arguments to (u_int8_t *) appears to work most
+ * of the time, but we've seen examples where it wasn't sufficient
+ * and there's nothing in ANSI C that requires that work.
+ *
+ * PUBLIC: void *__ua_memcpy __P((void *, const void *, size_t));
+ */
+void *
+__ua_memcpy(dst, src, len)
+ void *dst;
+ const void *src;
+ size_t len;
+{
+ return ((void *)memcpy(dst, src, len));
+}
diff --git a/storage/bdb/os/os_clock.c b/storage/bdb/os/os_clock.c
new file mode 100644
index 00000000000..8da02cf6f9c
--- /dev/null
+++ b/storage/bdb/os/os_clock.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_clock.c,v 1.9 2002/03/29 20:46:44 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ *
+ * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+#if defined(HAVE_GETTIMEOFDAY)
+ struct timeval tp;
+ int ret;
+
+retry: if (gettimeofday(&tp, NULL) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "gettimeofday: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_usec;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && defined(HAVE_CLOCK_GETTIME)
+ struct timespec tp;
+ int ret;
+
+retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "clock_gettime: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_nsec / 1000;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && !defined(HAVE_CLOCK_GETTIME)
+ time_t now;
+ int ret;
+
+ if (time(&now) == (time_t)-1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "time: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = now;
+ if (usecsp != NULL)
+ *usecsp = 0;
+#endif
+ return (0);
+}
diff --git a/storage/bdb/os/os_config.c b/storage/bdb/os/os_config.c
new file mode 100644
index 00000000000..b64952a8302
--- /dev/null
+++ b/storage/bdb/os/os_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_config.c,v 11.13 2002/01/31 19:54:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /* Most filesystems zero out implicitly created pages. */
+ return (0);
+}
diff --git a/storage/bdb/os/os_dir.c b/storage/bdb/os/os_dir.c
new file mode 100644
index 00000000000..3f59a23d963
--- /dev/null
+++ b/storage/bdb/os/os_dir.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_dir.c,v 11.14 2002/07/12 18:56:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ *
+ * PUBLIC: int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct dirent *dp;
+ DIR *dirp;
+ int arraysz, cnt, ret;
+ char **names;
+
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
+
+#ifdef HAVE_VXWORKS
+ if ((dirp = opendir((char *)dir)) == NULL)
+#else
+ if ((dirp = opendir(dir)) == NULL)
+#endif
+ return (__os_get_errno());
+ names = NULL;
+ for (arraysz = cnt = 0; (dp = readdir(dirp)) != NULL; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, dp->d_name, &names[cnt])) != 0)
+ goto nomem;
+ }
+ (void)closedir(dirp);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(dbenv, names, cnt);
+ if (dirp != NULL)
+ (void)closedir(dirp);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ *
+ * PUBLIC: void __os_dirfree __P((DB_ENV *, char **, int));
+ */
+void
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
+ char **names;
+ int cnt;
+{
+ if (DB_GLOBAL(j_dirfree) != NULL)
+ DB_GLOBAL(j_dirfree)(names, cnt);
+ else {
+ while (cnt > 0)
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
+ }
+}
diff --git a/storage/bdb/os/os_errno.c b/storage/bdb/os/os_errno.c
new file mode 100644
index 00000000000..4b40f88d177
--- /dev/null
+++ b/storage/bdb/os/os_errno.c
@@ -0,0 +1,64 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_errno.c,v 11.8 2002/01/11 15:52:59 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno_ret_zero --
+ * Return the value of errno, even if it's zero.
+ *
+ * PUBLIC: int __os_get_errno_ret_zero __P((void));
+ */
+int
+__os_get_errno_ret_zero()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_get_errno --
+ * Return the value of errno, or EAGAIN if errno is zero.
+ *
+ * PUBLIC: int __os_get_errno __P((void));
+ */
+int
+__os_get_errno()
+{
+ /*
+ * This routine must be able to return the same value repeatedly.
+ *
+ * We've seen cases where system calls failed but errno was never set.
+ * This version of __os_get_errno() sets errno to EAGAIN if it's not
+ * already set, to work around that problem. For obvious reasons, we
+ * can only call this function if we know an error has occurred, that
+ * is, we can't test errno for a non-zero value after this call.
+ */
+ if (errno == 0)
+ __os_set_errno(EAGAIN);
+
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ *
+ * PUBLIC: void __os_set_errno __P((int));
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
diff --git a/storage/bdb/os/os_fid.c b/storage/bdb/os/os_fid.c
new file mode 100644
index 00000000000..125e6f0712c
--- /dev/null
+++ b/storage/bdb/os/os_fid.c
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fid.c,v 11.14 2002/08/26 14:37:38 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file. The structure
+ * of a fileid is: ino(4) dev(4) time(4) pid(4) extra(4).
+ * For real files, which have a backing inode and device, the first
+ * 16 bytes are filled in and the extra bytes are left 0. For
+ * temporary files, the inode and device fields are left blank and
+ * the extra four bytes are filled in with a random value.
+ *
+ * PUBLIC: int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ struct stat sb;
+ size_t i;
+ int ret;
+ u_int32_t tmp;
+ u_int8_t *p;
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /* On POSIX/UNIX, use a dev/inode pair. */
+retry:
+#ifdef HAVE_VXWORKS
+ if (stat((char *)fname, &sb) != 0) {
+#else
+ if (stat(fname, &sb) != 0) {
+#endif
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "%s: %s", fname, strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ __os_id(&fid_serial);
+ else
+ fid_serial += 100000;
+
+ /*
+ * !!!
+ * Nothing is ever big enough -- on Sparc V9, st_ino, st_dev and the
+ * time_t types are all 8 bytes. As DB_FILE_ID_LEN is only 20 bytes,
+ * we convert to a (potentially) smaller fixed-size type and use it.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value. When we're called from the mpool layer, though,
+ * we need to be careful not to include anything that isn't
+ * reproducible for a given file, such as the timestamp or serial
+ * number.
+ */
+ tmp = (u_int32_t)sb.st_ino;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ tmp = (u_int32_t)sb.st_dev;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ if (unique_okay) {
+ /*
+ * We want the number of seconds, not the high-order 0 bits,
+ * so convert the returned time_t to a (potentially) smaller
+ * fixed-size type.
+ */
+ tmp = (u_int32_t)time(NULL);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/os/os_fsync.c b/storage/bdb/os/os_fsync.c
new file mode 100644
index 00000000000..46ab4885a16
--- /dev/null
+++ b/storage/bdb/os/os_fsync.c
@@ -0,0 +1,89 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fsync.c,v 11.14 2002/07/12 18:56:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_VXWORKS
+#include "ioLib.h"
+
+#define fsync(fd) __vx_fsync(fd);
+
+int
+__vx_fsync(fd)
+ int fd;
+{
+ int ret;
+
+ /*
+ * The results of ioctl are driver dependent. Some will return the
+ * number of bytes sync'ed. Only if it returns 'ERROR' should we
+ * flag it.
+ */
+ if ((ret = ioctl(fd, FIOSYNC, 0)) != ERROR)
+ return (0);
+ return (ret);
+}
+#endif
+
+#ifdef __hp3000s900
+#define fsync(fd) __mpe_fsync(fd);
+
+int
+__mpe_fsync(fd)
+ int fd;
+{
+ extern FCONTROL(short, short, void *);
+
+ FCONTROL(_MPE_FILENO(fd), 2, NULL); /* Flush the buffers */
+ FCONTROL(_MPE_FILENO(fd), 6, NULL); /* Write the EOF */
+ return (0);
+}
+#endif
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ do {
+ ret = DB_GLOBAL(j_fsync) != NULL ?
+ DB_GLOBAL(j_fsync)(fhp->fd) : fsync(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ return (ret);
+}
diff --git a/storage/bdb/os/os_handle.c b/storage/bdb/os/os_handle.c
new file mode 100644
index 00000000000..5f617085e5d
--- /dev/null
+++ b/storage/bdb/os/os_handle.c
@@ -0,0 +1,185 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_handle.c,v 11.28 2002/07/12 18:56:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+#ifdef HAVE_VXWORKS
+ int newflags;
+#endif
+
+ memset(fhp, 0, sizeof(*fhp));
+
+ /* If the application specified an interface, use it. */
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+#ifdef HAVE_VXWORKS
+ /*
+ * VxWorks does not support O_CREAT on open, you have to use
+ * creat() instead. (It does not support O_EXCL or O_TRUNC
+ * either, even though they are defined "for future support".)
+ * We really want the POSIX behavior that if O_CREAT is set,
+ * we open if it exists, or create it if it doesn't exist.
+ * If O_CREAT is specified, single thread and try to open the
+ * file. If successful, and O_EXCL return EEXIST. If
+ * unsuccessful call creat and then end single threading.
+ */
+ if (LF_ISSET(O_CREAT)) {
+ DB_BEGIN_SINGLE_THREAD;
+ newflags = flags & ~(O_CREAT | O_EXCL);
+ if ((fhp->fd =
+ open(name, newflags, mode)) != -1) {
+ if (LF_ISSET(O_EXCL)) {
+ /*
+ * If we get here, we want O_EXCL
+ * create, and it exists. Close and
+ * return EEXISTS.
+ */
+ (void)close(fhp->fd);
+ DB_END_SINGLE_THREAD;
+ return (EEXIST);
+ }
+ /*
+ * XXX
+ * Assume any error means non-existence.
+ * Unfortunately return values (even for
+ * non-existence) are driver specific so
+ * there is no single error we can use to
+ * verify we truly got the equivalent of
+ * ENOENT.
+ */
+ } else
+ fhp->fd = creat(name, newflags);
+ DB_END_SINGLE_THREAD;
+ } else
+
+ /* FALLTHROUGH */
+#endif
+#ifdef __VMS
+ /*
+ * !!!
+ * Open with full sharing on VMS.
+ *
+ * We use these flags because they are the ones set by the VMS
+ * CRTL mmap() call when it opens a file, and we have to be
+ * able to open files that mmap() has previously opened, e.g.,
+ * when we're joining already existing DB regions.
+ */
+ fhp->fd = open(name, flags, mode, "shr=get,put,upd,del,upi");
+#else
+ fhp->fd = open(name, flags, mode);
+#endif
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
+ } else {
+#if defined(HAVE_FCNTL_F_SETFD)
+ /* Deny file descriptor access to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, FD_CLOEXEC) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s",
+ strerror(ret));
+ (void)__os_closehandle(dbenv, fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ int ret;
+
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
+
+ do {
+ ret = DB_GLOBAL(j_close) != NULL ?
+ DB_GLOBAL(j_close)(fhp->fd) : close(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ /* Unlink the file if we haven't already done so. */
+ if (F_ISSET(fhp, DB_FH_UNLINK)) {
+ (void)__os_unlink(dbenv, fhp->name);
+ (void)__os_free(dbenv, fhp->name);
+ }
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret);
+}
diff --git a/storage/bdb/os/os_id.c b/storage/bdb/os/os_id.c
new file mode 100644
index 00000000000..c242bb12e23
--- /dev/null
+++ b/storage/bdb/os/os_id.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_id.c,v 1.2 2002/01/11 15:52:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_id --
+ * Return a 32-bit value identifying the current thread of control.
+ *
+ * PUBLIC: void __os_id __P((u_int32_t *));
+ */
+void
+__os_id(idp)
+ u_int32_t *idp;
+{
+ /*
+ * By default, use the process ID.
+ *
+ * getpid() returns a pid_t which we convert to a u_int32_t. I have
+ * not yet seen a system where a pid_t has 64-bits, but I'm sure they
+ * exist. Since we're returning only the bottom 32-bits, you cannot
+ * use the return of __os_id to reference a process (for example, you
+ * cannot send a signal to the value returned by __os_id). To send a
+ * signal to the current process, use raise(3) instead.
+ */
+#ifdef HAVE_VXWORKS
+ *idp = taskIdSelf();
+#else
+ *idp = getpid();
+#endif
+}
diff --git a/storage/bdb/os/os_map.c b/storage/bdb/os/os_map.c
new file mode 100644
index 00000000000..6d385b6a84d
--- /dev/null
+++ b/storage/bdb/os/os_map.c
@@ -0,0 +1,443 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_map.c,v 11.44 2002/07/12 18:56:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#ifdef HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_SHMGET
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_MMAP
+static int __os_map __P((DB_ENV *, char *, DB_FH *, size_t, int, int, void **));
+#endif
+#ifndef HAVE_SHMGET
+static int __db_nosystemmem __P((DB_ENV *));
+#endif
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+ /*
+ * If the region is in system memory on UNIX, we use shmget(2).
+ *
+ * !!!
+ * There exist spinlocks that don't work in shmget memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in shmget memory, we better be private and not be
+ * threaded. If we reach this point, we know we're public, so
+ * it's an error.
+ */
+#if defined(MUTEX_NO_SHMGET_LOCKS)
+ __db_err(dbenv,
+ "architecture does not support locks inside system shared memory");
+ return (EINVAL);
+#endif
+#if defined(HAVE_SHMGET)
+ {
+ key_t segid;
+ int id, ret;
+
+ /*
+ * We could potentially create based on REGION_CREATE_OK, but
+ * that's dangerous -- we might get crammed in sideways if
+ * some of the expected regions exist but others do not. Also,
+ * if the requested size differs from an existing region's
+ * actual size, then all sorts of nasty things can happen.
+ * Basing create solely on REGION_CREATE is much safer -- a
+ * recovery will get us straightened out.
+ */
+ if (F_ISSET(infop, REGION_CREATE)) {
+ /*
+ * The application must give us a base System V IPC key
+ * value. Adjust that value based on the region's ID,
+ * and correct so the user's original value appears in
+ * the ipcs output.
+ */
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv,
+ "no base system shared memory ID specified");
+ return (EINVAL);
+ }
+ segid = (key_t)(dbenv->shm_key + (infop->id - 1));
+
+ /*
+ * If map to an existing region, assume the application
+ * crashed and we're restarting. Delete the old region
+ * and re-try. If that fails, return an error, the
+ * application will have to select a different segment
+ * ID or clean up some other way.
+ */
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ (void)shmctl(id, IPC_RMID, NULL);
+ if ((id = shmget(segid, 0, 0)) != -1) {
+ __db_err(dbenv,
+ "shmget: key: %ld: shared system memory region already exists",
+ (long)segid);
+ return (EAGAIN);
+ }
+ }
+ if ((id =
+ shmget(segid, rp->size, IPC_CREAT | 0600)) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmget: key: %ld: unable to create shared system memory region: %s",
+ (long)segid, strerror(ret));
+ return (ret);
+ }
+ rp->segid = id;
+ } else
+ id = rp->segid;
+
+ if ((infop->addr = shmat(id, NULL, 0)) == (void *)-1) {
+ infop->addr = NULL;
+ ret = __os_get_errno();
+ __db_err(dbenv,
+ "shmat: id %d: unable to attach to shared system memory region: %s",
+ id, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+ }
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+ {
+ DB_FH fh;
+ int ret;
+
+ /*
+ * Try to open/create the shared region file. We DO NOT need to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the region are properly ordered, our caller has already taken care
+ * of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_REGION | DB_OSO_DIRECT |
+ (F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE : 0),
+ infop->mode, &fh)) != 0)
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+
+ /*
+ * If we created the file, grow it to its full size before mapping
+ * it in. We really want to avoid touching the buffer cache after
+ * mmap(2) is called, doing anything else confuses the hell out of
+ * systems without merged VM/buffer cache systems, or, more to the
+ * point, *badly* merged VM/buffer cache systems.
+ */
+ if (ret == 0 && F_ISSET(infop, REGION_CREATE))
+ ret = __db_fileinit(dbenv,
+ &fh, rp->size, F_ISSET(dbenv, DB_ENV_REGION_INIT) ? 1 : 0);
+
+ /* Map the file in. */
+ if (ret == 0)
+ ret = __os_map(dbenv,
+ infop->name, &fh, rp->size, 1, 0, &infop->addr);
+
+ if (F_ISSET(&fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &fh);
+
+ return (ret);
+ }
+#else
+ COMPQUIET(infop, NULL);
+ COMPQUIET(rp, NULL);
+ __db_err(dbenv,
+ "architecture lacks mmap(2), shared environments not possible");
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM)) {
+#ifdef HAVE_SHMGET
+ int ret, segid;
+
+ /*
+ * We may be about to remove the memory referenced by rp,
+ * save the segment ID, and (optionally) wipe the original.
+ */
+ segid = rp->segid;
+ if (destroy)
+ rp->segid = INVALID_REGION_SEGID;
+
+ if (shmdt(infop->addr) != 0) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "shmdt: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && shmctl(segid, IPC_RMID,
+ NULL) != 0 && (ret = __os_get_errno()) != EINVAL) {
+ __db_err(dbenv,
+ "shmctl: id %ld: unable to delete system shared memory region: %s",
+ segid, strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+#else
+ return (__db_nosystemmem(dbenv));
+#endif
+ }
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ (void)munlock(infop->addr, rp->size);
+#endif
+ if (munmap(infop->addr, rp->size) != 0) {
+ int ret;
+
+ ret = __os_get_errno();
+ __db_err(dbenv, "munmap: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (destroy && __os_region_unlink(dbenv, infop->name) != 0)
+ return (__os_get_errno());
+
+ return (0);
+#else
+ COMPQUIET(destroy, 0);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+#if defined(HAVE_MMAP) && !defined(HAVE_QNX)
+ return (__os_map(dbenv, path, fhp, len, 0, is_rdonly, addrp));
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+#endif
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
+
+#ifdef HAVE_MMAP
+#ifdef HAVE_MUNLOCK
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
+ while (munlock(addr, len) != 0 && __os_get_errno() == EINTR)
+ ;
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+ {
+ int ret;
+
+ while ((ret = munmap(addr, len)) != 0 &&
+ __os_get_errno() == EINTR)
+ ;
+ return (ret ? __os_get_errno() : 0);
+ }
+#else
+ COMPQUIET(dbenv, NULL);
+
+ return (EINVAL);
+#endif
+}
+
+#ifdef HAVE_MMAP
+/*
+ * __os_map --
+ * Call the mmap(2) function.
+ */
+static int
+__os_map(dbenv, path, fhp, len, is_region, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ void *p;
+ int flags, prot, ret;
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)
+ (path, len, is_region, is_rdonly, addrp));
+
+ /*
+ * If it's read-only, it's private, and if it's not, it's shared.
+ * Don't bother with an additional parameter.
+ */
+ flags = is_rdonly ? MAP_PRIVATE : MAP_SHARED;
+
+#ifdef MAP_FILE
+ /*
+ * Historically, MAP_FILE was required for mapping regular files,
+ * even though it was the default. Some systems have it, some
+ * don't, some that have it set it to 0.
+ */
+ flags |= MAP_FILE;
+#endif
+
+ /*
+ * I know of no systems that implement the flag to tell the system
+ * that the region contains semaphores, but it's not an unreasonable
+ * thing to do, and has been part of the design since forever. I
+ * don't think anyone will object, but don't set it for read-only
+ * files, it doesn't make sense.
+ */
+#ifdef MAP_HASSEMAPHORE
+ if (is_region && !is_rdonly)
+ flags |= MAP_HASSEMAPHORE;
+#else
+ COMPQUIET(is_region, 0);
+#endif
+
+ prot = PROT_READ | (is_rdonly ? 0 : PROT_WRITE);
+
+ /*
+ * XXX
+ * Work around a bug in the VMS V7.1 mmap() implementation. To map
+ * a file into memory on VMS it needs to be opened in a certain way,
+ * originally. To get the file opened in that certain way, the VMS
+ * mmap() closes the file and re-opens it. When it does this, it
+ * doesn't flush any caches out to disk before closing. The problem
+ * this causes us is that when the memory cache doesn't get written
+ * out, the file isn't big enough to match the memory chunk and the
+ * mmap() call fails. This call to fsync() fixes the problem. DEC
+ * thinks this isn't a bug because of language in XPG5 discussing user
+ * responsibility for on-disk and in-memory synchronization.
+ */
+#ifdef VMS
+ if (__os_fsync(dbenv, fhp) == -1)
+ return (__os_get_errno());
+#endif
+
+ /* MAP_FAILED was not defined in early mmap implementations. */
+#ifndef MAP_FAILED
+#define MAP_FAILED -1
+#endif
+ if ((p = mmap(NULL,
+ len, prot, flags, fhp->fd, (off_t)0)) == (void *)MAP_FAILED) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "mmap: %s", strerror(ret));
+ return (ret);
+ }
+
+#ifdef HAVE_MLOCK
+ /*
+ * If it's a region, we want to make sure that the memory isn't paged.
+ * For example, Solaris will page large mpools because it thinks that
+ * I/O buffer memory is more important than we are. The mlock system
+ * call may or may not succeed (mlock is restricted to the super-user
+ * on some systems). Currently, the only other use of mmap in DB is
+ * to map read-only databases -- we don't want them paged, either, so
+ * the call isn't conditional.
+ */
+ if (F_ISSET(dbenv, DB_ENV_LOCKDOWN) && mlock(p, len) != 0) {
+ ret = __os_get_errno();
+ (void)munmap(p, len);
+ __db_err(dbenv, "mlock: %s", strerror(ret));
+ return (ret);
+ }
+#else
+ COMPQUIET(dbenv, NULL);
+#endif
+
+ *addrp = p;
+ return (0);
+}
+#endif
+
+#ifndef HAVE_SHMGET
+/*
+ * __db_nosystemmem --
+ * No system memory environments error message.
+ */
+static int
+__db_nosystemmem(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv,
+ "architecture doesn't support environments in system memory");
+ return (__db_eopnotsup(dbenv));
+}
+#endif
diff --git a/storage/bdb/os/os_method.c b/storage/bdb/os/os_method.c
new file mode 100644
index 00000000000..04367654efa
--- /dev/null
+++ b/storage/bdb/os/os_method.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_method.c,v 11.15 2002/07/12 18:56:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * EXTERN: int db_env_set_func_close __P((int (*)(int)));
+ */
+int
+db_env_set_func_close(func_close)
+ int (*func_close) __P((int));
+{
+ DB_GLOBAL(j_close) = func_close;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_dirfree __P((void (*)(char **, int)));
+ */
+int
+db_env_set_func_dirfree(func_dirfree)
+ void (*func_dirfree) __P((char **, int));
+{
+ DB_GLOBAL(j_dirfree) = func_dirfree;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_dirlist
+ * EXTERN: __P((int (*)(const char *, char ***, int *)));
+ */
+int
+db_env_set_func_dirlist(func_dirlist)
+ int (*func_dirlist) __P((const char *, char ***, int *));
+{
+ DB_GLOBAL(j_dirlist) = func_dirlist;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_exists __P((int (*)(const char *, int *)));
+ */
+int
+db_env_set_func_exists(func_exists)
+ int (*func_exists) __P((const char *, int *));
+{
+ DB_GLOBAL(j_exists) = func_exists;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_free __P((void (*)(void *)));
+ */
+int
+db_env_set_func_free(func_free)
+ void (*func_free) __P((void *));
+{
+ DB_GLOBAL(j_free) = func_free;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_fsync __P((int (*)(int)));
+ */
+int
+db_env_set_func_fsync(func_fsync)
+ int (*func_fsync) __P((int));
+{
+ DB_GLOBAL(j_fsync) = func_fsync;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_ioinfo __P((int (*)(const char *,
+ * EXTERN: int, u_int32_t *, u_int32_t *, u_int32_t *)));
+ */
+int
+db_env_set_func_ioinfo(func_ioinfo)
+ int (*func_ioinfo)
+ __P((const char *, int, u_int32_t *, u_int32_t *, u_int32_t *));
+{
+ DB_GLOBAL(j_ioinfo) = func_ioinfo;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_malloc __P((void *(*)(size_t)));
+ */
+int
+db_env_set_func_malloc(func_malloc)
+ void *(*func_malloc) __P((size_t));
+{
+ DB_GLOBAL(j_malloc) = func_malloc;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_map
+ * EXTERN: __P((int (*)(char *, size_t, int, int, void **)));
+ */
+int
+db_env_set_func_map(func_map)
+ int (*func_map) __P((char *, size_t, int, int, void **));
+{
+ DB_GLOBAL(j_map) = func_map;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+ */
+int
+db_env_set_func_open(func_open)
+ int (*func_open) __P((const char *, int, ...));
+{
+ DB_GLOBAL(j_open) = func_open;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+ */
+int
+db_env_set_func_read(func_read)
+ ssize_t (*func_read) __P((int, void *, size_t));
+{
+ DB_GLOBAL(j_read) = func_read;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+ */
+int
+db_env_set_func_realloc(func_realloc)
+ void *(*func_realloc) __P((void *, size_t));
+{
+ DB_GLOBAL(j_realloc) = func_realloc;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_rename
+ * EXTERN: __P((int (*)(const char *, const char *)));
+ */
+int
+db_env_set_func_rename(func_rename)
+ int (*func_rename) __P((const char *, const char *));
+{
+ DB_GLOBAL(j_rename) = func_rename;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_seek
+ * EXTERN: __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+ */
+int
+db_env_set_func_seek(func_seek)
+ int (*func_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+{
+ DB_GLOBAL(j_seek) = func_seek;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+ */
+int
+db_env_set_func_sleep(func_sleep)
+ int (*func_sleep) __P((u_long, u_long));
+{
+ DB_GLOBAL(j_sleep) = func_sleep;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_unlink __P((int (*)(const char *)));
+ */
+int
+db_env_set_func_unlink(func_unlink)
+ int (*func_unlink) __P((const char *));
+{
+ DB_GLOBAL(j_unlink) = func_unlink;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+ */
+int
+db_env_set_func_unmap(func_unmap)
+ int (*func_unmap) __P((void *, size_t));
+{
+ DB_GLOBAL(j_unmap) = func_unmap;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_write
+ * EXTERN: __P((ssize_t (*)(int, const void *, size_t)));
+ */
+int
+db_env_set_func_write(func_write)
+ ssize_t (*func_write) __P((int, const void *, size_t));
+{
+ DB_GLOBAL(j_write) = func_write;
+ return (0);
+}
+
+/*
+ * EXTERN: int db_env_set_func_yield __P((int (*)(void)));
+ */
+int
+db_env_set_func_yield(func_yield)
+ int (*func_yield) __P((void));
+{
+ DB_GLOBAL(j_yield) = func_yield;
+ return (0);
+}
diff --git a/storage/bdb/os/os_oflags.c b/storage/bdb/os/os_oflags.c
new file mode 100644
index 00000000000..f75178de75e
--- /dev/null
+++ b/storage/bdb/os/os_oflags.c
@@ -0,0 +1,118 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_oflags.c,v 11.9 2002/01/11 15:53:00 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <fcntl.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_oflags --
+ * Convert open(2) flags to DB flags.
+ *
+ * PUBLIC: u_int32_t __db_oflags __P((int));
+ */
+u_int32_t
+__db_oflags(oflags)
+ int oflags;
+{
+ u_int32_t dbflags;
+
+ dbflags = 0;
+
+ if (oflags & O_CREAT)
+ dbflags |= DB_CREATE;
+
+ if (oflags & O_TRUNC)
+ dbflags |= DB_TRUNCATE;
+
+ /*
+ * !!!
+ * Convert POSIX 1003.1 open(2) mode flags to DB flags. This isn't
+ * an exact science as few POSIX implementations have a flag value
+ * for O_RDONLY, it's simply the lack of a write flag.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE (O_RDONLY | O_RDWR | O_WRONLY)
+#endif
+ switch (oflags & O_ACCMODE) {
+ case O_RDWR:
+ case O_WRONLY:
+ break;
+ default:
+ dbflags |= DB_RDONLY;
+ break;
+ }
+ return (dbflags);
+}
+
+/*
+ * __db_omode --
+ * Convert a permission string to the correct open(2) flags.
+ *
+ * PUBLIC: int __db_omode __P((const char *));
+ */
+int
+__db_omode(perm)
+ const char *perm;
+{
+ int mode;
+
+#ifdef DB_WIN32
+#ifndef S_IRUSR
+#define S_IRUSR S_IREAD /* R for owner */
+#endif
+#ifndef S_IWUSR
+#define S_IWUSR S_IWRITE /* W for owner */
+#endif
+#ifndef S_IRGRP
+#define S_IRGRP 0 /* R for group */
+#endif
+#ifndef S_IWGRP
+#define S_IWGRP 0 /* W for group */
+#endif
+#ifndef S_IROTH
+#define S_IROTH 0 /* R for other */
+#endif
+#ifndef S_IWOTH
+#define S_IWOTH 0 /* W for other */
+#endif
+#else
+#ifndef S_IRUSR
+#define S_IRUSR 0000400 /* R for owner */
+#define S_IWUSR 0000200 /* W for owner */
+#define S_IRGRP 0000040 /* R for group */
+#define S_IWGRP 0000020 /* W for group */
+#define S_IROTH 0000004 /* R for other */
+#define S_IWOTH 0000002 /* W for other */
+#endif
+#endif /* DB_WIN32 */
+ mode = 0;
+ if (perm[0] == 'r')
+ mode |= S_IRUSR;
+ if (perm[1] == 'w')
+ mode |= S_IWUSR;
+ if (perm[2] == 'r')
+ mode |= S_IRGRP;
+ if (perm[3] == 'w')
+ mode |= S_IWGRP;
+ if (perm[4] == 'r')
+ mode |= S_IROTH;
+ if (perm[5] == 'w')
+ mode |= S_IWOTH;
+ return (mode);
+}
diff --git a/storage/bdb/os/os_open.c b/storage/bdb/os/os_open.c
new file mode 100644
index 00000000000..0a4dbadc6e8
--- /dev/null
+++ b/storage/bdb/os/os_open.c
@@ -0,0 +1,257 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_open.c,v 11.37 2002/06/21 20:35:16 sandstro Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef HAVE_QNX
+static int __os_region_open __P((DB_ENV *, const char *, int, int, DB_FH *));
+#endif
+
+/*
+ * __os_open --
+ * Open a file.
+ *
+ * PUBLIC: int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ int oflags, ret;
+
+ oflags = 0;
+
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
+
+#if defined(O_BINARY)
+ /*
+ * If there's a binary-mode open flag, set it, we never want any
+ * kind of translation. Some systems do translations by default,
+ * e.g., with Cygwin, the default mode for an open() is set by the
+ * mode of the mount that underlies the file.
+ */
+ oflags |= O_BINARY;
+#endif
+
+ /*
+ * DB requires the POSIX 1003.1 semantic that two files opened at the
+ * same time with DB_OSO_CREATE/O_CREAT and DB_OSO_EXCL/O_EXCL flags
+ * set return an EEXIST failure in at least one.
+ */
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+#if defined(O_DSYNC) && defined(XXX_NEVER_SET)
+ /*
+ * !!!
+ * We should get better performance if we push the log files to disk
+ * immediately instead of waiting for the sync. However, Solaris
+ * (and likely any other system based on the 4BSD filesystem releases),
+ * doesn't implement O_DSYNC correctly, only flushing data blocks and
+ * not inode or indirect blocks.
+ */
+ if (LF_ISSET(DB_OSO_LOG))
+ oflags |= O_DSYNC;
+#endif
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+#ifdef HAVE_O_DIRECT
+ if (LF_ISSET(DB_OSO_DIRECT))
+ oflags |= O_DIRECT;
+#endif
+
+#ifdef HAVE_QNX
+ if (LF_ISSET(DB_OSO_REGION))
+ return (__os_region_open(dbenv, name, oflags, mode, fhp));
+#endif
+ /* Open the file. */
+ if ((ret = __os_openhandle(dbenv, name, oflags, mode, fhp)) != 0)
+ return (ret);
+
+#ifdef HAVE_DIRECTIO
+ if (LF_ISSET(DB_OSO_DIRECT))
+ (void)directio(fhp->fd, DIRECTIO_ON);
+#endif
+
+ /*
+ * Delete any temporary file.
+ *
+ * !!!
+ * There's a race here, where we've created a file and we crash before
+ * we can unlink it. Temporary files aren't common in DB, regardless,
+ * it's not a security problem because the file is empty. There's no
+ * reasonable way to avoid the race (playing signal games isn't worth
+ * the portability nightmare), so we just live with it.
+ */
+ if (LF_ISSET(DB_OSO_TEMP)) {
+#if defined(HAVE_UNLINK_WITH_OPEN_FAILURE) || defined(CONFIG_TEST)
+ if ((ret = __os_strdup(dbenv, name, &fhp->name)) != 0) {
+ (void)__os_closehandle(dbenv, fhp);
+ (void)__os_unlink(dbenv, name);
+ return (ret);
+ }
+ F_SET(fhp, DB_FH_UNLINK);
+#else
+ (void)__os_unlink(dbenv, name);
+#endif
+ }
+
+ return (0);
+}
+
+#ifdef HAVE_QNX
+/*
+ * __os_region_open --
+ * Open a shared memory region file using POSIX shm_open.
+ */
+static int
+__os_region_open(dbenv, name, oflags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int oflags;
+ int mode;
+ DB_FH *fhp;
+{
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, name, &newname)) != 0)
+ goto err;
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = shm_open(newname, oflags, mode);
+ if (fhp->fd == -1)
+ ret = __os_get_errno();
+ else {
+#ifdef HAVE_FCNTL_F_SETFD
+ /* Deny file descriptor acces to any child process. */
+ if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "fcntl(F_SETFD): %s", strerror(ret));
+ __os_closehandle(dbenv, fhp);
+ } else
+#endif
+ F_SET(fhp, DB_FH_VALID);
+ }
+ /*
+ * Once we have created the object, we don't need the name
+ * anymore. Other callers of this will convert themselves.
+ */
+err:
+ if (newname != NULL)
+ __os_free(dbenv, newname);
+ return (ret);
+}
+
+/*
+ * __os_shmname --
+ * Translate a pathname into a shm_open memory object name.
+ *
+ * PUBLIC: #ifdef HAVE_QNX
+ * PUBLIC: int __os_shmname __P((DB_ENV *, const char *, char **));
+ * PUBLIC: #endif
+ */
+int
+__os_shmname(dbenv, name, newnamep)
+ DB_ENV *dbenv;
+ const char *name;
+ char **newnamep;
+{
+ int ret;
+ size_t size;
+ char *p, *q, *tmpname;
+
+ *newnamep = NULL;
+
+ /*
+ * POSIX states that the name for a shared memory object
+ * may begin with a slash '/' and support for subsequent
+ * slashes is implementation-dependent. The one implementation
+ * we know of right now, QNX, forbids subsequent slashes.
+ * We don't want to be parsing pathnames for '.' and '..' in
+ * the middle. In order to allow easy conversion, just take
+ * the last component as the shared memory name. This limits
+ * the namespace a bit, but makes our job a lot easier.
+ *
+ * We should not be modifying user memory, so we use our own.
+ * Caller is responsible for freeing the memory we give them.
+ */
+ if ((ret = __os_strdup(dbenv, name, &tmpname)) != 0)
+ return (ret);
+ /*
+ * Skip over filename component.
+ * We set that separator to '\0' so that we can do another
+ * __db_rpath. However, we immediately set it then to ':'
+ * so that we end up with the tailing directory:filename.
+ * We require a home directory component. Return an error
+ * if there isn't one.
+ */
+ p = __db_rpath(tmpname);
+ if (p == NULL)
+ return (EINVAL);
+ if (p != tmpname) {
+ *p = '\0';
+ q = p;
+ p = __db_rpath(tmpname);
+ *q = ':';
+ }
+ if (p != NULL) {
+ /*
+ * If we have a path component, copy and return it.
+ */
+ ret = __os_strdup(dbenv, p, newnamep);
+ __os_free(dbenv, tmpname);
+ return (ret);
+ }
+
+ /*
+ * We were given just a directory name with no path components.
+ * Add a leading slash, and copy the remainder.
+ */
+ size = strlen(tmpname) + 2;
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
+ return (ret);
+ p[0] = '/';
+ memcpy(&p[1], tmpname, size-1);
+ __os_free(dbenv, tmpname);
+ *newnamep = p;
+ return (0);
+}
+#endif
diff --git a/storage/bdb/os/os_region.c b/storage/bdb/os/os_region.c
new file mode 100644
index 00000000000..6529f708b2c
--- /dev/null
+++ b/storage/bdb/os/os_region.c
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_region.c,v 11.15 2002/07/12 18:56:51 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_r_attach --
+ * Attach to a shared memory region.
+ *
+ * PUBLIC: int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_attach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+ /* Round off the requested size for the underlying VM. */
+ OS_VMROUNDOFF(rp->size);
+
+#ifdef DB_REGIONSIZE_MAX
+ /* Some architectures have hard limits on the maximum region size. */
+ if (rp->size > DB_REGIONSIZE_MAX) {
+ __db_err(dbenv, "region size %lu is too large; maximum is %lu",
+ (u_long)rp->size, (u_long)DB_REGIONSIZE_MAX);
+ return (EINVAL);
+ }
+#endif
+
+ /*
+ * If a region is private, malloc the memory.
+ *
+ * !!!
+ * If this fails because the region is too large to malloc, mmap(2)
+ * using the MAP_ANON or MAP_ANONYMOUS flags would be an alternative.
+ * I don't know of any architectures (yet!) where malloc is a problem.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+#if defined(MUTEX_NO_MALLOC_LOCKS)
+ /*
+ * !!!
+ * There exist spinlocks that don't work in malloc memory, e.g.,
+ * the HP/UX msemaphore interface. If we don't have locks that
+ * will work in malloc memory, we better not be private or not
+ * be threaded.
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD)) {
+ __db_err(dbenv, "%s",
+ "architecture does not support locks inside process-local (malloc) memory");
+ __db_err(dbenv, "%s",
+ "application may not specify both DB_PRIVATE and DB_THREAD");
+ return (EINVAL);
+ }
+#endif
+ if ((ret =
+ __os_malloc(dbenv, rp->size, &infop->addr)) != 0)
+ return (ret);
+#if defined(UMRW) && !defined(DIAGNOSTIC)
+ memset(infop->addr, CLEAR_BYTE, rp->size);
+#endif
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(infop->name,
+ rp->size, 1, 0, &infop->addr));
+
+ return (__os_r_sysattach(dbenv, infop, rp));
+}
+
+/*
+ * __os_r_detach --
+ * Detach from a shared memory region.
+ *
+ * PUBLIC: int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_detach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ REGION *rp;
+
+ rp = infop->rp;
+
+ /* If a region is private, free the memory. */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ __os_free(dbenv, infop->addr);
+ return (0);
+ }
+
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(infop->addr, rp->size));
+
+ return (__os_r_sysdetach(dbenv, infop, destroy));
+}
diff --git a/storage/bdb/os/os_rename.c b/storage/bdb/os/os_rename.c
new file mode 100644
index 00000000000..2569a9c3186
--- /dev/null
+++ b/storage/bdb/os/os_rename.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rename.c,v 11.12 2002/07/12 18:56:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_rename --
+ * Rename a file. If flags is non-zero, then errors are OK and we
+ * should not output an error message.
+ *
+ * PUBLIC: int __os_rename __P((DB_ENV *,
+ * PUBLIC: const char *, const char *, u_int32_t));
+ */
+int
+__os_rename(dbenv, old, new, flags)
+ DB_ENV *dbenv;
+ const char *old, *new;
+ u_int32_t flags;
+{
+ int ret;
+
+ do {
+ ret = DB_GLOBAL(j_rename) != NULL ?
+ DB_GLOBAL(j_rename)(old, new) : rename(old, new);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0 && flags == 0)
+ __db_err(dbenv, "rename %s %s: %s", old, new, strerror(ret));
+ return (ret);
+}
diff --git a/storage/bdb/os/os_root.c b/storage/bdb/os/os_root.c
new file mode 100644
index 00000000000..cd5bfc352e9
--- /dev/null
+++ b/storage/bdb/os/os_root.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_root.c,v 11.6 2002/01/11 15:53:01 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_isroot --
+ * Return if user has special permissions.
+ *
+ * PUBLIC: int __os_isroot __P((void));
+ */
+int
+__os_isroot()
+{
+#ifdef HAVE_GETUID
+ return (getuid() == 0);
+#else
+ return (0);
+#endif
+}
diff --git a/storage/bdb/os/os_rpath.c b/storage/bdb/os/os_rpath.c
new file mode 100644
index 00000000000..b9ccba01bd5
--- /dev/null
+++ b/storage/bdb/os/os_rpath.c
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rpath.c,v 11.7 2002/01/11 15:53:01 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#ifdef HAVE_VXWORKS
+#include "iosLib.h"
+#endif
+
+/*
+ * __db_rpath --
+ * Return the last path separator in the path or NULL if none found.
+ *
+ * PUBLIC: char *__db_rpath __P((const char *));
+ */
+char *
+__db_rpath(path)
+ const char *path;
+{
+ const char *s, *last;
+#ifdef HAVE_VXWORKS
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name. We want to
+ * skip over the device name and not take into account any
+ * PATH_SEPARATOR characters that might be in that name.
+ *
+ * XXX [#2393]
+ * VxWorks supports having a filename directly follow a device
+ * name with no separator. I.e. to access a file 'xxx' in
+ * the top level directory of a device mounted at "mydrive"
+ * you could say "mydrivexxx" or "mydrive/xxx" or "mydrive\xxx".
+ * We do not support the first usage here.
+ * XXX
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ s = path;
+ else
+ s = ptail;
+#else
+ s = path;
+#endif
+
+ last = NULL;
+ if (PATH_SEPARATOR[1] != '\0') {
+ for (; s[0] != '\0'; ++s)
+ if (strchr(PATH_SEPARATOR, s[0]) != NULL)
+ last = s;
+ } else
+ for (; s[0] != '\0'; ++s)
+ if (s[0] == PATH_SEPARATOR[0])
+ last = s;
+ return ((char *)last);
+}
diff --git a/storage/bdb/os/os_rw.c b/storage/bdb/os/os_rw.c
new file mode 100644
index 00000000000..9a79342c7b8
--- /dev/null
+++ b/storage/bdb/os/os_rw.c
@@ -0,0 +1,148 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rw.c,v 11.24 2002/07/12 18:56:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+#if defined(HAVE_PREAD) && defined(HAVE_PWRITE)
+ switch (op) {
+ case DB_IO_READ:
+ if (DB_GLOBAL(j_read) != NULL)
+ goto slow;
+ *niop = pread(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ case DB_IO_WRITE:
+ if (DB_GLOBAL(j_write) != NULL)
+ goto slow;
+ *niop = pwrite(db_iop->fhp->fd, db_iop->buf,
+ db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
+ break;
+ }
+ if (*niop == (size_t)db_iop->bytes)
+ return (0);
+slow:
+#endif
+ MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ ssize_t nr;
+ int ret;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+retry: if ((nr = DB_GLOBAL(j_read) != NULL ?
+ DB_GLOBAL(j_read)(fhp->fd, taddr, len - offset) :
+ read(fhp->fd, taddr, len - offset)) < 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "read: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ ssize_t nw;
+ int ret;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw)
+retry: if ((nw = DB_GLOBAL(j_write) != NULL ?
+ DB_GLOBAL(j_write)(fhp->fd, taddr, len - offset) :
+ write(fhp->fd, taddr, len - offset)) < 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ *nwp = len;
+ return (0);
+}
diff --git a/storage/bdb/os/os_seek.c b/storage/bdb/os/os_seek.c
new file mode 100644
index 00000000000..5b2aa45d5dd
--- /dev/null
+++ b/storage/bdb/os/os_seek.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_seek.c,v 11.18 2002/07/12 18:56:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ *
+ * PUBLIC: int __os_seek __P((DB_ENV *,
+ * PUBLIC: DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ off_t offset;
+ int ret, whence;
+
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ if (DB_GLOBAL(j_seek) != NULL)
+ ret = DB_GLOBAL(j_seek)(fhp->fd,
+ pgsize, pageno, relative, isrewind, whence);
+ else {
+ offset = (off_t)pgsize * pageno + relative;
+ if (isrewind)
+ offset = -offset;
+ do {
+ ret = lseek(fhp->fd, offset, whence) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
+ }
+
+ if (ret != 0)
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+
+ return (ret);
+}
diff --git a/storage/bdb/os/os_sleep.c b/storage/bdb/os/os_sleep.c
new file mode 100644
index 00000000000..42d496dbae7
--- /dev/null
+++ b/storage/bdb/os/os_sleep.c
@@ -0,0 +1,80 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_sleep.c,v 11.15 2002/07/12 18:56:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#ifdef HAVE_VXWORKS
+#include <sys/times.h>
+#include <time.h>
+#include <selectLib.h>
+#else
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+#endif /* HAVE_VXWORKS */
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ *
+ * PUBLIC: int __os_sleep __P((DB_ENV *, u_long, u_long));
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ struct timeval t;
+ int ret;
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; usecs -= 1000000)
+ ++secs;
+
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ t.tv_sec = secs;
+ t.tv_usec = usecs;
+ do {
+ ret = select(0, NULL, NULL, NULL, &t) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "select: %s", strerror(ret));
+
+ return (ret);
+}
diff --git a/storage/bdb/os/os_spin.c b/storage/bdb/os/os_spin.c
new file mode 100644
index 00000000000..fb36977cb44
--- /dev/null
+++ b/storage/bdb/os/os_spin.c
@@ -0,0 +1,113 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_spin.c,v 11.13 2002/08/07 02:02:07 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+#include <sys/pstat.h>
+#endif
+
+#include <limits.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+static int __os_pstat_getdynamic __P((void));
+
+/*
+ * __os_pstat_getdynamic --
+ * HP/UX.
+ */
+static int
+__os_pstat_getdynamic()
+{
+ struct pst_dynamic psd;
+
+ return (pstat_getdynamic(&psd,
+ sizeof(psd), (size_t)1, 0) == -1 ? 1 : psd.psd_proc_cnt);
+}
+#endif
+
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+static int __os_sysconf __P((void));
+
+/*
+ * __os_sysconf --
+ * Solaris, Linux.
+ */
+static int
+__os_sysconf()
+{
+ long nproc;
+
+ return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? (int)nproc : 1);
+}
+#endif
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ *
+ * PUBLIC: int __os_spin __P((DB_ENV *));
+ */
+int
+__os_spin(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ *
+ * XXX
+ * We don't want to repeatedly call the underlying function because
+ * it can be expensive (e.g., requiring multiple filesystem accesses
+ * under Debian Linux).
+ */
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
+
+ dbenv->tas_spins = 1;
+#if defined(HAVE_PSTAT_GETDYNAMIC)
+ dbenv->tas_spins = __os_pstat_getdynamic();
+#endif
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+ dbenv->tas_spins = __os_sysconf();
+#endif
+
+ /*
+ * Spin 50 times per processor, we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (dbenv->tas_spins != 1)
+ dbenv->tas_spins *= 50;
+
+ return (dbenv->tas_spins);
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ *
+ * PUBLIC: void __os_yield __P((DB_ENV*, u_long));
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
+ return;
+ (void)__os_sleep(dbenv, 0, usecs);
+}
diff --git a/storage/bdb/os/os_stat.c b/storage/bdb/os/os_stat.c
new file mode 100644
index 00000000000..c3510e36f5d
--- /dev/null
+++ b/storage/bdb/os/os_stat.c
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_stat.c,v 11.20 2002/07/12 18:56:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ int ret;
+ struct stat sb;
+
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+
+ do {
+ ret =
+#ifdef HAVE_VXWORKS
+ stat((char *)path, &sb);
+#else
+ stat(path, &sb);
+#endif
+ if (ret != 0)
+ ret = __os_get_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
+
+#if !defined(S_ISDIR) || defined(STAT_MACROS_BROKEN)
+#undef S_ISDIR
+#ifdef _S_IFDIR
+#define S_ISDIR(m) (_S_IFDIR & (m))
+#else
+#define S_ISDIR(m) (((m) & 0170000) == 0040000)
+#endif
+#endif
+ if (isdirp != NULL)
+ *isdirp = S_ISDIR(sb.st_mode);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ struct stat sb;
+
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+retry:
+ if (fstat(fhp->fd, &sb) == -1) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "fstat: %s", strerror(ret));
+ return (ret);
+ }
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t)(sb.st_size / MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t)(sb.st_size % MEGABYTE);
+
+ /*
+ * Return the underlying filesystem blocksize, if available.
+ *
+ * XXX
+ * Check for a 0 size -- the HP MPE/iX architecture has st_blksize,
+ * but it's always 0.
+ */
+#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
+ if (iosizep != NULL && (*iosizep = sb.st_blksize) == 0)
+ *iosizep = DB_DEF_IOSIZE;
+#else
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+#endif
+ return (0);
+}
diff --git a/storage/bdb/os/os_tmpdir.c b/storage/bdb/os/os_tmpdir.c
new file mode 100644
index 00000000000..94645af5e71
--- /dev/null
+++ b/storage/bdb/os/os_tmpdir.c
@@ -0,0 +1,121 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_tmpdir.c,v 11.19 2002/01/11 15:53:02 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+#ifdef macintosh
+#include <TFileSpec.h>
+#endif
+
+/*
+ * __os_tmpdir --
+ * Set the temporary directory path.
+ *
+ * The order of items in the list structure and the order of checks in
+ * the environment are documented.
+ *
+ * PUBLIC: int __os_tmpdir __P((DB_ENV *, u_int32_t));
+ */
+int
+__os_tmpdir(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ int isdir;
+
+ /*
+ * !!!
+ * Don't change this to:
+ *
+ * static const char * const list[]
+ *
+ * because it creates a text relocation in position independent code.
+ */
+ static const char * list[] = {
+ "/var/tmp",
+ "/usr/tmp",
+ "/temp", /* Windows. */
+ "/tmp",
+ "C:/temp", /* Windows. */
+ "C:/tmp", /* Windows. */
+ NULL
+ };
+ const char * const *lp, *p;
+
+ /* Use the environment if it's permitted and initialized. */
+ if (LF_ISSET(DB_USE_ENVIRON) ||
+ (LF_ISSET(DB_USE_ENVIRON_ROOT) && __os_isroot())) {
+ if ((p = getenv("TMPDIR")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMPDIR environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TEMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TEMP environment variable");
+ return (EINVAL);
+ }
+ /* Windows */
+ if (p == NULL && (p = getenv("TMP")) != NULL && p[0] == '\0') {
+ __db_err(dbenv, "illegal TMP environment variable");
+ return (EINVAL);
+ }
+ /* Macintosh */
+ if (p == NULL &&
+ (p = getenv("TempFolder")) != NULL && p[0] == '\0') {
+ __db_err(dbenv,
+ "illegal TempFolder environment variable");
+ return (EINVAL);
+ }
+ if (p != NULL)
+ return (__os_strdup(dbenv, p, &dbenv->db_tmp_dir));
+ }
+
+#ifdef macintosh
+ /* Get the path to the temporary folder. */
+ {FSSpec spec;
+
+ if (!Special2FSSpec(kTemporaryFolderType,
+ kOnSystemDisk, 0, &spec))
+ return (__os_strdup(dbenv,
+ FSp2FullPath(&spec), &dbenv->db_tmp_dir));
+ }
+#endif
+#ifdef DB_WIN32
+ /* Get the path to the temporary directory. */
+ {int len;
+ char *eos, temp[MAXPATHLEN + 1];
+
+ if ((len = GetTempPath(sizeof(temp) - 1, temp)) > 2) {
+ eos = &temp[len];
+ *eos-- = '\0';
+ if (*eos == '\\' || *eos == '/')
+ *eos = '\0';
+ if (__os_exists(temp, &isdir) == 0 && isdir != 0)
+ return (__os_strdup(dbenv,
+ temp, &dbenv->db_tmp_dir));
+ }
+ }
+#endif
+
+ /* Step through the static list looking for a possibility. */
+ for (lp = list; *lp != NULL; ++lp)
+ if (__os_exists(*lp, &isdir) == 0 && isdir != 0)
+ return (__os_strdup(dbenv, *lp, &dbenv->db_tmp_dir));
+ return (0);
+}
diff --git a/storage/bdb/os/os_unlink.c b/storage/bdb/os/os_unlink.c
new file mode 100644
index 00000000000..28b03afd1aa
--- /dev/null
+++ b/storage/bdb/os/os_unlink.c
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_unlink.c,v 11.24 2002/07/12 18:56:53 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_region_unlink --
+ * Remove a shared memory object file.
+ *
+ * PUBLIC: int __os_region_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_region_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+#ifdef HAVE_QNX
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, path, &newname)) != 0)
+ goto err;
+
+ if ((ret = shm_unlink(newname)) != 0) {
+ ret = __os_get_errno();
+ if (ret != ENOENT)
+ __db_err(dbenv, "shm_unlink: %s: %s",
+ newname, strerror(ret));
+ }
+err:
+ if (newname != NULL)
+ __os_free(dbenv, newname);
+ return (ret);
+#else
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+
+ return (__os_unlink(dbenv, path));
+#endif
+}
+
+/*
+ * __os_unlink --
+ * Remove a file.
+ *
+ * PUBLIC: int __os_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ int ret;
+
+retry: ret = DB_GLOBAL(j_unlink) != NULL ?
+ DB_GLOBAL(j_unlink)(path) :
+#ifdef HAVE_VXWORKS
+ unlink((char *)path);
+#else
+ unlink(path);
+#endif
+ if (ret == -1) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ /*
+ * XXX
+ * We really shouldn't be looking at this value ourselves,
+ * but ENOENT usually signals that a file is missing, and
+ * we attempt to unlink things (such as v. 2.x environment
+ * regions, in DB_ENV->remove) that we're expecting not to
+ * be there. Reporting errors in these cases is annoying.
+ */
+#ifdef HAVE_VXWORKS
+ /*
+ * XXX
+ * The results of unlink are file system driver specific
+ * on VxWorks. In the case of removing a file that did
+ * not exist, some, at least, return an error, but with
+ * an errno of 0, not ENOENT.
+ *
+ * Code below falls through to original if-statement only
+ * we didn't get a "successful" error.
+ */
+ if (ret != 0)
+ /* FALLTHROUGH */
+#endif
+ if (ret != ENOENT)
+ __db_err(dbenv, "unlink: %s: %s", path, strerror(ret));
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/os_vxworks/os_vx_abs.c b/storage/bdb/os_vxworks/os_vx_abs.c
new file mode 100644
index 00000000000..93e9be7269b
--- /dev/null
+++ b/storage/bdb/os_vxworks/os_vx_abs.c
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_vx_abs.c,v 1.7 2002/01/11 15:53:02 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "iosLib.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name at all.
+ * Use iosDevFind() to see if name matches any of our devices.
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ return (0);
+ /*
+ * If the routine used a device, then ptail points to the
+ * rest and we are an abs path.
+ */
+ if (ptail != path)
+ return (1);
+ /*
+ * If the path starts with a '/', then we are an absolute path,
+ * using the host machine, otherwise we are not.
+ */
+ return (path[0] == '/');
+}
diff --git a/storage/bdb/os_vxworks/os_vx_config.c b/storage/bdb/os_vxworks/os_vx_config.c
new file mode 100644
index 00000000000..810983b38ff
--- /dev/null
+++ b/storage/bdb/os_vxworks/os_vx_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_vx_config.c,v 1.4 2002/01/11 15:53:03 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Some VxWorks FS drivers do not zero-fill pages that were never
+ * explicitly written to the file, they give you random garbage,
+ * and that breaks Berkeley DB.
+ */
+ return (1);
+}
diff --git a/storage/bdb/os_vxworks/os_vx_map.c b/storage/bdb/os_vxworks/os_vx_map.c
new file mode 100644
index 00000000000..8ad4f0765ce
--- /dev/null
+++ b/storage/bdb/os_vxworks/os_vx_map.c
@@ -0,0 +1,441 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * This code is derived from software contributed to Sleepycat Software by
+ * Frederick G.M. Roeber of Netscape Communications Corp.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_vx_map.c,v 1.21 2002/03/06 19:36:58 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * DB uses memory-mapped files for two things:
+ * faster access of read-only databases, and
+ * shared memory for process synchronization and locking.
+ * The code carefully does not mix the two uses. The first-case uses are
+ * actually written such that memory-mapping isn't really required -- it's
+ * merely a convenience -- so we don't have to worry much about it. In the
+ * second case, it's solely used as a shared memory mechanism, so that's
+ * all we have to replace.
+ *
+ * All memory in VxWorks is shared, and a task can allocate memory and keep
+ * notes. So I merely have to allocate memory, remember the "filename" for
+ * that memory, and issue small-integer segment IDs which index the list of
+ * these shared-memory segments. Subsequent opens are checked against the
+ * list of already open segments.
+ */
+typedef struct {
+ void *segment; /* Segment address. */
+ u_int32_t size; /* Segment size. */
+ char *name; /* Segment name. */
+ long segid; /* Segment ID. */
+} os_segdata_t;
+
+static os_segdata_t *__os_segdata; /* Segment table. */
+static int __os_segdata_size; /* Segment table size. */
+
+#define OS_SEGDATA_STARTING_SIZE 16
+#define OS_SEGDATA_INCREMENT 16
+
+static int __os_segdata_allocate
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_find_byname
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_init __P((DB_ENV *));
+static int __os_segdata_new __P((DB_ENV *, int *));
+static int __os_segdata_release __P((DB_ENV *, REGION *, int));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+
+ if (__os_segdata == NULL)
+ __os_segdata_init(dbenv);
+
+ DB_BEGIN_SINGLE_THREAD;
+
+ /* Try to find an already existing segment. */
+ ret = __os_segdata_find_byname(dbenv, infop->name, infop, rp);
+
+ /*
+ * If we are trying to join a region, it is easy, either we
+ * found it and we return, or we didn't find it and we return
+ * an error that it doesn't exist.
+ */
+ if (!F_ISSET(infop, REGION_CREATE)) {
+ if (ret != 0) {
+ __db_err(dbenv, "segment %s does not exist",
+ infop->name);
+ ret = EAGAIN;
+ }
+ goto out;
+ }
+
+ /*
+ * If we get here, we are trying to create the region.
+ * There are several things to consider:
+ * - if we have an error (not a found or not-found value), return.
+ * - they better have shm_key set.
+ * - if the region is already there (ret == 0 from above),
+ * assume the application crashed and we're restarting.
+ * Delete the old region.
+ * - try to create the region.
+ */
+ if (ret != 0 && ret != ENOENT)
+ goto out;
+
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "no base shared memory ID specified");
+ ret = EAGAIN;
+ goto out;
+ }
+ if (ret == 0 && __os_segdata_release(dbenv, rp, 1) != 0) {
+ __db_err(dbenv,
+ "key: %ld: shared memory region already exists",
+ dbenv->shm_key + (infop->id - 1));
+ ret = EAGAIN;
+ goto out;
+ }
+
+ ret = __os_segdata_allocate(dbenv, infop->name, infop, rp);
+out:
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ /*
+ * If just detaching, there is no mapping to discard.
+ * If destroying, remove the region.
+ */
+ if (destroy)
+ return (__os_segdata_release(dbenv, infop->rp, 0));
+ return (0);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(addr, NULL);
+ COMPQUIET(len, 0);
+ return (EINVAL);
+}
+
+/*
+ * __os_segdata_init --
+ * Initializes the library's table of shared memory segments.
+ * Called once on the first time through __os_segdata_new().
+ */
+static int
+__os_segdata_init(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (__os_segdata != NULL) {
+ __db_err(dbenv, "shared memory segment already exists");
+ return (EEXIST);
+ }
+
+ /*
+ * The lock init call returns a locked lock.
+ */
+ DB_BEGIN_SINGLE_THREAD;
+ __os_segdata_size = OS_SEGDATA_STARTING_SIZE;
+ ret = __os_calloc(dbenv,
+ __os_segdata_size, sizeof(os_segdata_t), &__os_segdata);
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_segdata_destroy --
+ * Destroys the library's table of shared memory segments. It also
+ * frees all linked data: the segments themselves, and their names.
+ * Currently not called. This function should be called if the
+ * user creates a function to unload or shutdown.
+ *
+ * PUBLIC: int __os_segdata_destroy __P((DB_ENV *));
+ */
+int
+__os_segdata_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ os_segdata_t *p;
+ int i;
+
+ if (__os_segdata == NULL)
+ return (0);
+
+ DB_BEGIN_SINGLE_THREAD;
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL) {
+ __os_free(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ }
+
+ __os_free(dbenv, __os_segdata);
+ __os_segdata = NULL;
+ __os_segdata_size = 0;
+ DB_END_SINGLE_THREAD;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_allocate --
+ * Creates a new segment of the specified size, optionally with the
+ * specified name.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_allocate(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ int id, ret;
+
+ if ((ret = __os_segdata_new(dbenv, &id)) != 0)
+ return (ret);
+
+ p = &__os_segdata[id];
+ if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
+ return (ret);
+ if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ return (ret);
+ }
+ p->size = rp->size;
+ p->segid = dbenv->shm_key + infop->id - 1;
+
+ infop->addr = p->segment;
+ rp->segid = id;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_new --
+ * Finds a new segdata slot. Does not initialise it, so the fd returned
+ * is only valid until you call this again.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_new(dbenv, segidp)
+ DB_ENV *dbenv;
+ int *segidp;
+{
+ os_segdata_t *p;
+ int i, newsize, ret;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->segment == NULL) {
+ *segidp = i;
+ return (0);
+ }
+ }
+
+ /*
+ * No more free slots, expand.
+ */
+ newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
+ if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
+ &__os_segdata)) != 0)
+ return (ret);
+ memset(&__os_segdata[__os_segdata_size],
+ 0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
+
+ *segidp = __os_segdata_size;
+ __os_segdata_size = newsize;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_find_byname --
+ * Finds a segment by its name and shm_key.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ *
+ * PUBLIC: __os_segdata_find_byname
+ * PUBLIC: __P((DB_ENV *, const char *, REGINFO *, REGION *));
+ */
+static int
+__os_segdata_find_byname(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ long segid;
+ int i;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (name == NULL) {
+ __db_err(dbenv, "no segment name given");
+ return (EAGAIN);
+ }
+
+ /*
+ * If we are creating the region, compute the segid.
+ * If we are joining the region, we use the segid in the
+ * index we are given.
+ */
+ if (F_ISSET(infop, REGION_CREATE))
+ segid = dbenv->shm_key + (infop->id - 1);
+ else {
+ if (rp->segid >= __os_segdata_size ||
+ rp->segid == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "Invalid segment id given");
+ return (EAGAIN);
+ }
+ segid = __os_segdata[rp->segid].segid;
+ }
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL && strcmp(name, p->name) == 0 &&
+ p->segid == segid) {
+ infop->addr = p->segment;
+ rp->segid = i;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * __os_segdata_release --
+ * Free a segdata entry.
+ */
+static int
+__os_segdata_release(dbenv, rp, is_locked)
+ DB_ENV *dbenv;
+ REGION *rp;
+ int is_locked;
+{
+ os_segdata_t *p;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (rp->segid < 0 || rp->segid >= __os_segdata_size) {
+ __db_err(dbenv, "segment id %ld out of range", rp->segid);
+ return (EINVAL);
+ }
+
+ if (is_locked == 0)
+ DB_BEGIN_SINGLE_THREAD;
+ p = &__os_segdata[rp->segid];
+ if (p->name != NULL) {
+ __os_free(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ if (is_locked == 0)
+ DB_END_SINGLE_THREAD;
+
+ /* Any shrink-table logic could go here */
+
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_abs.c b/storage/bdb/os_win32/os_abs.c
new file mode 100644
index 00000000000..c8bead83ec3
--- /dev/null
+++ b/storage/bdb/os_win32/os_abs.c
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_abs.c,v 11.5 2002/01/11 15:53:05 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ /*
+ * !!!
+ * Check for drive specifications, e.g., "C:". In addition, the path
+ * separator used by the win32 DB (PATH_SEPARATOR) is \; look for both
+ * / and \ since these are user-input paths.
+ */
+ if (isalpha(path[0]) && path[1] == ':')
+ path += 2;
+ return (path[0] == '/' || path[0] == '\\');
+}
diff --git a/storage/bdb/os_win32/os_clock.c b/storage/bdb/os_win32/os_clock.c
new file mode 100644
index 00000000000..1bf154f9da9
--- /dev/null
+++ b/storage/bdb/os_win32/os_clock.c
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_clock.c,v 1.7 2002/07/12 18:56:53 bostic Exp $";
+#endif /* not lint */
+
+#include <sys/types.h>
+#include <sys/timeb.h>
+#include <string.h>
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+ struct _timeb now;
+
+ _ftime(&now);
+ if (secsp != NULL)
+ *secsp = (u_int32_t)now.time;
+ if (usecsp != NULL)
+ *usecsp = now.millitm * 1000;
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_config.c b/storage/bdb/os_win32/os_config.c
new file mode 100644
index 00000000000..a2c220daf1a
--- /dev/null
+++ b/storage/bdb/os_win32/os_config.c
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_config.c,v 11.13 2002/01/11 15:53:06 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Windows/NT zero-fills pages that were never explicitly written to
+ * the file. Windows 95/98 gives you random garbage, and that breaks
+ * Berkeley DB.
+ */
+ return (__os_is_winnt() ? 0 : 1);
+}
diff --git a/storage/bdb/os_win32/os_dir.c b/storage/bdb/os_win32/os_dir.c
new file mode 100644
index 00000000000..3f47c4960b0
--- /dev/null
+++ b/storage/bdb/os_win32/os_dir.c
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_dir.c,v 11.12 2002/07/12 18:56:54 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_dirlist --
+ * Return a list of the files in a directory.
+ */
+int
+__os_dirlist(dbenv, dir, namesp, cntp)
+ DB_ENV *dbenv;
+ const char *dir;
+ char ***namesp;
+ int *cntp;
+{
+ struct _finddata_t fdata;
+#ifdef _WIN64
+ intptr_t dirhandle;
+#else
+ long dirhandle;
+#endif
+ int arraysz, cnt, finished, ret;
+ char **names, filespec[MAXPATHLEN];
+
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
+
+ (void)snprintf(filespec, sizeof(filespec), "%s/*", dir);
+ if ((dirhandle = _findfirst(filespec, &fdata)) == -1)
+ return (__os_get_errno());
+
+ names = NULL;
+ finished = 0;
+ for (arraysz = cnt = 0; finished != 1; ++cnt) {
+ if (cnt >= arraysz) {
+ arraysz += 100;
+ if ((ret = __os_realloc(dbenv,
+ arraysz * sizeof(names[0]), &names)) != 0)
+ goto nomem;
+ }
+ if ((ret = __os_strdup(dbenv, fdata.name, &names[cnt])) != 0)
+ goto nomem;
+ if (_findnext(dirhandle, &fdata) != 0)
+ finished = 1;
+ }
+ _findclose(dirhandle);
+
+ *namesp = names;
+ *cntp = cnt;
+ return (0);
+
+nomem: if (names != NULL)
+ __os_dirfree(dbenv, names, cnt);
+ return (ret);
+}
+
+/*
+ * __os_dirfree --
+ * Free the list of files.
+ */
+void
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
+ char **names;
+ int cnt;
+{
+ if (DB_GLOBAL(j_dirfree) != NULL) {
+ DB_GLOBAL(j_dirfree)(names, cnt);
+ return;
+ }
+
+ while (cnt > 0)
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
+}
diff --git a/storage/bdb/os_win32/os_errno.c b/storage/bdb/os_win32/os_errno.c
new file mode 100644
index 00000000000..d6fac82e6f3
--- /dev/null
+++ b/storage/bdb/os_win32/os_errno.c
@@ -0,0 +1,145 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_errno.c,v 11.10 2002/07/12 04:05:00 mjc Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_get_errno --
+ * Return the value of errno.
+ */
+int
+__os_get_errno()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
+ * __os_set_errno --
+ * Set the value of errno.
+ */
+void
+__os_set_errno(evalue)
+ int evalue;
+{
+ errno = evalue;
+}
+
+/*
+ * __os_win32_errno --
+ * Return the last Windows error as an errno.
+ * We give generic error returns:
+ *
+ * EFAULT means Win* call failed,
+ * and GetLastError provided no extra info.
+ *
+ * EIO means error on Win* call.
+ * and we were unable to provide a meaningful errno for this Windows
+ * error. More information is only available by setting a breakpoint
+ * here.
+ *
+ * PUBLIC: #if defined(DB_WIN32)
+ * PUBLIC: int __os_win32_errno __P((void));
+ * PUBLIC: #endif
+ */
+int
+__os_win32_errno(void)
+{
+ DWORD last_error;
+ int ret;
+
+ /* Ignore errno - we used to check it here. */
+
+ last_error = GetLastError();
+
+ /*
+ * Take our best guess at translating some of the Windows error
+ * codes. We really care about only a few of these.
+ */
+ switch (last_error) {
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_PATH_NOT_FOUND:
+ ret = ENOENT;
+ break;
+
+ case ERROR_NO_MORE_FILES:
+ case ERROR_TOO_MANY_OPEN_FILES:
+ ret = EMFILE;
+ break;
+
+ case ERROR_ACCESS_DENIED:
+ ret = EPERM;
+ break;
+
+ case ERROR_INVALID_HANDLE:
+ ret = EBADF;
+ break;
+
+ case ERROR_NOT_ENOUGH_MEMORY:
+ ret = ENOMEM;
+ break;
+
+ case ERROR_DISK_FULL:
+ ret = ENOSPC;
+
+ case ERROR_ARENA_TRASHED:
+ case ERROR_BAD_COMMAND:
+ case ERROR_BAD_ENVIRONMENT:
+ case ERROR_BAD_FORMAT:
+ case ERROR_GEN_FAILURE:
+ case ERROR_INVALID_ACCESS:
+ case ERROR_INVALID_BLOCK:
+ case ERROR_INVALID_DATA:
+ case ERROR_READ_FAULT:
+ case ERROR_WRITE_FAULT:
+ ret = EFAULT;
+ break;
+
+ case ERROR_FILE_EXISTS:
+ case ERROR_ALREADY_EXISTS:
+ ret = EEXIST;
+ break;
+
+ case ERROR_NOT_SAME_DEVICE:
+ ret = EXDEV;
+ break;
+
+ case ERROR_WRITE_PROTECT:
+ ret = EACCES;
+ break;
+
+ case ERROR_NOT_READY:
+ ret = EBUSY;
+ break;
+
+ case ERROR_LOCK_VIOLATION:
+ case ERROR_SHARING_VIOLATION:
+ ret = EBUSY;
+ break;
+
+ case ERROR_RETRY:
+ ret = EINTR;
+ break;
+
+ case 0:
+ ret = EFAULT;
+ break;
+
+ default:
+ ret = EIO; /* Generic error. */
+ break;
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_fid.c b/storage/bdb/os_win32/os_fid.c
new file mode 100644
index 00000000000..1190ad26e81
--- /dev/null
+++ b/storage/bdb/os_win32/os_fid.c
@@ -0,0 +1,143 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fid.c,v 11.15 2002/08/26 14:37:39 margo Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+#define SERIAL_INIT 0
+static u_int32_t fid_serial = SERIAL_INIT;
+
+/*
+ * __os_fileid --
+ * Return a unique identifier for a file.
+ */
+int
+__os_fileid(dbenv, fname, unique_okay, fidp)
+ DB_ENV *dbenv;
+ const char *fname;
+ int unique_okay;
+ u_int8_t *fidp;
+{
+ size_t i;
+ u_int32_t tmp;
+ u_int8_t *p;
+ int ret;
+
+ /*
+ * The documentation for GetFileInformationByHandle() states that the
+ * inode-type numbers are not constant between processes. Actually,
+ * they are, they're the NTFS MFT indexes. So, this works on NTFS,
+ * but perhaps not on other platforms, and perhaps not over a network.
+ * Can't think of a better solution right now.
+ */
+ DB_FH fh;
+ BY_HANDLE_FILE_INFORMATION fi;
+ BOOL retval = FALSE;
+
+ DB_ASSERT(fname != NULL);
+
+ /* Clear the buffer. */
+ memset(fidp, 0, DB_FILE_ID_LEN);
+
+ /*
+ * Initialize/increment the serial number we use to help avoid
+ * fileid collisions. Note that we don't bother with locking;
+ * it's unpleasant to do from down in here, and if we race on
+ * this no real harm will be done, since the finished fileid
+ * has so many other components.
+ *
+ * We increment by 100000 on each call as a simple way of
+ * randomizing; simply incrementing seems potentially less useful
+ * if pids are also simply incremented, since this is process-local
+ * and we may be one of a set of processes starting up. 100000
+ * pushes us out of pid space on most platforms, and has few
+ * interesting properties in base 2.
+ */
+ if (fid_serial == SERIAL_INIT)
+ __os_id(&fid_serial);
+ else
+ fid_serial += 100000;
+
+ /*
+ * First we open the file, because we're not given a handle to it.
+ * If we can't open it, we're in trouble.
+ */
+ if ((ret = __os_open(dbenv, fname, DB_OSO_RDONLY, _S_IREAD, &fh)) != 0)
+ return (ret);
+
+ /* File open, get its info */
+ if ((retval = GetFileInformationByHandle(fh.handle, &fi)) == FALSE)
+ ret = __os_win32_errno();
+ __os_closehandle(dbenv, &fh);
+
+ if (retval == FALSE)
+ return (ret);
+
+ /*
+ * We want the three 32-bit words which tell us the volume ID and
+ * the file ID. We make a crude attempt to copy the bytes over to
+ * the callers buffer.
+ *
+ * We don't worry about byte sexing or the actual variable sizes.
+ *
+ * When this routine is called from the DB access methods, it's only
+ * called once -- whatever ID is generated when a database is created
+ * is stored in the database file's metadata, and that is what is
+ * saved in the mpool region's information to uniquely identify the
+ * file.
+ *
+ * When called from the mpool layer this routine will be called each
+ * time a new thread of control wants to share the file, which makes
+ * things tougher. As far as byte sexing goes, since the mpool region
+ * lives on a single host, there's no issue of that -- the entire
+ * region is byte sex dependent. As far as variable sizes go, we make
+ * the simplifying assumption that 32-bit and 64-bit processes will
+ * get the same 32-bit values if we truncate any returned 64-bit value
+ * to a 32-bit value.
+ */
+ tmp = (u_int32_t)fi.nFileIndexLow;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = (u_int32_t)fi.nFileIndexHigh;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+
+ if (unique_okay) {
+ /*
+ * Use the system time to try to get a unique value
+ * within this process. A millisecond counter
+ * overflows 32 bits in about 49 days. So we use 8
+ * bytes, and don't bother with the volume ID, which
+ * is not very useful for our purposes.
+ */
+ SYSTEMTIME st;
+
+ GetSystemTime(&st);
+ tmp = (st.wYear - 1900) * 12 + (st.wMonth - 1);
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ tmp = ((((st.wDay - 1) * 24 + st.wHour) * 60 +
+ st.wMinute) * 60 + st.wSecond) * 1000 +
+ st.wMilliseconds;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ for (p = (u_int8_t *)&fid_serial, i = sizeof(u_int32_t);
+ i > 0; --i)
+ *fidp++ = *p++;
+ } else {
+ tmp = (u_int32_t)fi.dwVolumeSerialNumber;
+ for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+ }
+
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_fsync.c b/storage/bdb/os_win32/os_fsync.c
new file mode 100644
index 00000000000..6fd3e1dcdf4
--- /dev/null
+++ b/storage/bdb/os_win32/os_fsync.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fsync.c,v 11.15 2002/07/12 18:56:54 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ ret = 0;
+ do {
+ if (DB_GLOBAL(j_fsync) != NULL)
+ success = (DB_GLOBAL(j_fsync)(fhp->fd) == 0);
+ else {
+ success = FlushFileBuffers(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_handle.c b/storage/bdb/os_win32/os_handle.c
new file mode 100644
index 00000000000..7db9c3da977
--- /dev/null
+++ b/storage/bdb/os_win32/os_handle.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_handle.c,v 11.30 2002/07/12 18:56:54 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->handle = INVALID_HANDLE_VALUE;
+
+ /* If the application specified an interface, use it. */
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+ fhp->fd = open(name, flags, mode);
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
+ } else {
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) &&
+ ((fhp->fd != -1) || (fhp->handle != INVALID_HANDLE_VALUE)));
+
+ ret = 0;
+
+ do {
+ if (DB_GLOBAL(j_close) != NULL)
+ success = (DB_GLOBAL(j_close)(fhp->fd) == 0);
+ else if (fhp->handle != INVALID_HANDLE_VALUE) {
+ success = CloseHandle(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ else
+ success = (close(fhp->fd) == 0);
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ fhp->handle = INVALID_HANDLE_VALUE;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_map.c b/storage/bdb/os_win32/os_map.c
new file mode 100644
index 00000000000..1f16c9fead4
--- /dev/null
+++ b/storage/bdb/os_win32/os_map.c
@@ -0,0 +1,338 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_map.c,v 11.38 2002/09/10 02:35:48 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+static int __os_map
+ __P((DB_ENV *, char *, REGINFO *, DB_FH *, size_t, int, int, int, void **));
+static int __os_unique_name __P((char *, HANDLE, char *, size_t));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ DB_FH fh;
+ int is_system, ret;
+
+ /*
+ * Try to open/create the file. We DO NOT need to ensure that multiple
+ * threads/processes attempting to simultaneously create the region are
+ * properly ordered, our caller has already taken care of that.
+ */
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_DIRECT |
+ F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE: 0,
+ infop->mode, &fh)) != 0) {
+ __db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * On Windows/9X, files that are opened by multiple processes do not
+ * share data correctly. For this reason, the DB_SYSTEM_MEM flag is
+ * implied for any application that does not specify the DB_PRIVATE
+ * flag.
+ */
+ is_system = F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) ||
+ (!F_ISSET(dbenv, DB_ENV_PRIVATE) && __os_is_winnt() == 0);
+
+ /*
+ * Map the file in. If we're creating an in-system-memory region,
+ * specify a segment ID (which is never used again) so that the
+ * calling code writes out the REGENV_REF structure to the primary
+ * environment file.
+ */
+ ret = __os_map(dbenv, infop->name, infop, &fh, rp->size,
+ 1, is_system, 0, &infop->addr);
+ if (ret == 0 && is_system == 1)
+ rp->segid = 1;
+
+ (void)__os_closehandle(dbenv, &fh);
+
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared memory region.
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ int ret, t_ret;
+
+ if (infop->wnt_handle != NULL) {
+ (void)CloseHandle(*((HANDLE*)(infop->wnt_handle)));
+ __os_free(dbenv, infop->wnt_handle);
+ }
+
+ ret = !UnmapViewOfFile(infop->addr) ? __os_win32_errno() : 0;
+ if (ret != 0)
+ __db_err(dbenv, "UnmapViewOfFile: %s", strerror(ret));
+
+ if (!F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, infop->name);
+ if ((t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addr)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addr;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(path, len, 0, is_rdonly, addr));
+
+ return (__os_map(dbenv, path, NULL, fhp, len, 0, 0, is_rdonly, addr));
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* If the user replaced the map call, call through their interface. */
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
+
+ return (!UnmapViewOfFile(addr) ? __os_win32_errno() : 0);
+}
+
+/*
+ * __os_unique_name --
+ * Create a unique identifying name from a pathname (may be absolute or
+ * relative) and/or a file descriptor.
+ *
+ * The name returned must be unique (different files map to different
+ * names), and repeatable (same files, map to same names). It's not
+ * so easy to do by name. Should handle not only:
+ *
+ * foo.bar == ./foo.bar == c:/whatever_path/foo.bar
+ *
+ * but also understand that:
+ *
+ * foo.bar == Foo.Bar (FAT file system)
+ * foo.bar != Foo.Bar (NTFS)
+ *
+ * The best solution is to use the file index, found in the file
+ * information structure (similar to UNIX inode #).
+ *
+ * When a file is deleted, its file index may be reused,
+ * but if the unique name has not gone from its namespace,
+ * we may get a conflict. So to ensure some tie in to the
+ * original pathname, we also use the creation time and the
+ * file basename. This is not a perfect system, but it
+ * should work for all but anamolous test cases.
+ *
+ */
+static int
+__os_unique_name(orig_path, hfile, result_path, result_path_len)
+ char *orig_path, *result_path;
+ HANDLE hfile;
+ size_t result_path_len;
+{
+ BY_HANDLE_FILE_INFORMATION fileinfo;
+ char *basename, *p;
+
+ /*
+ * In Windows, pathname components are delimited by '/' or '\', and
+ * if neither is present, we need to strip off leading drive letter
+ * (e.g. c:foo.txt).
+ */
+ basename = strrchr(orig_path, '/');
+ p = strrchr(orig_path, '\\');
+ if (basename == NULL || (p != NULL && p > basename))
+ basename = p;
+ if (basename == NULL)
+ basename = strrchr(orig_path, ':');
+
+ if (basename == NULL)
+ basename = orig_path;
+ else
+ basename++;
+
+ if (!GetFileInformationByHandle(hfile, &fileinfo))
+ return (__os_win32_errno());
+
+ (void)snprintf(result_path, result_path_len,
+ "__db_shmem.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%s",
+ fileinfo.dwVolumeSerialNumber,
+ fileinfo.nFileIndexHigh,
+ fileinfo.nFileIndexLow,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ basename);
+
+ return (0);
+}
+
+/*
+ * __os_map --
+ * The mmap(2) function for Windows.
+ */
+static int
+__os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ char *path;
+ DB_FH *fhp;
+ int is_region, is_system, is_rdonly;
+ size_t len;
+ void **addr;
+{
+ HANDLE hMemory;
+ REGENV *renv;
+ int ret, use_pagefile;
+ char shmem_name[MAXPATHLEN];
+ void *pMemory;
+
+ ret = 0;
+ if (infop != NULL)
+ infop->wnt_handle = NULL;
+
+ use_pagefile = is_region && is_system;
+
+ /*
+ * If creating a region in system space, get a matching name in the
+ * paging file namespace.
+ */
+ if (use_pagefile && (ret = __os_unique_name(
+ path, fhp->handle, shmem_name, sizeof(shmem_name))) != 0)
+ return (ret);
+
+ /*
+ * XXX
+ * DB: We have not implemented copy-on-write here.
+ *
+ * XXX
+ * DB: This code will fail if the library is ever compiled on a 64-bit
+ * machine.
+ *
+ * XXX
+ * If this is an region in system memory, let's try opening using the
+ * OpenFileMapping() first. Why, oh why are we doing this?
+ *
+ * Well, we might be asking the OS for a handle to a pre-existing
+ * memory section, or we might be the first to get here and want the
+ * section created. CreateFileMapping() sounds like it will do both
+ * jobs. But, not so. It seems to mess up making the commit charge to
+ * the process. It thinks, incorrectly, that when we want to join a
+ * previously existing section, that it should make a commit charge
+ * for the whole section. In fact, there is no new committed memory
+ * whatever. The call can fail if there is insufficient memory free
+ * to handle the erroneous commit charge. So, we find that the bogus
+ * commit is not made if we call OpenFileMapping(). So we do that
+ * first, and only call CreateFileMapping() if we're really creating
+ * the section.
+ */
+ hMemory = NULL;
+ if (use_pagefile)
+ hMemory = OpenFileMapping(
+ is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS,
+ 0,
+ shmem_name);
+
+ if (hMemory == NULL)
+ hMemory = CreateFileMapping(
+ use_pagefile ? (HANDLE)-1 : fhp->handle,
+ 0,
+ is_rdonly ? PAGE_READONLY : PAGE_READWRITE,
+ 0, (DWORD)len,
+ use_pagefile ? shmem_name : NULL);
+ if (hMemory == NULL) {
+ ret = __os_win32_errno();
+ __db_err(dbenv, "OpenFileMapping: %s", strerror(ret));
+ return (ret);
+ }
+
+ pMemory = MapViewOfFile(hMemory,
+ (is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS), 0, 0, len);
+ if (pMemory == NULL) {
+ ret = __os_win32_errno();
+ __db_err(dbenv, "MapViewOfFile: %s", strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * XXX
+ * It turns out that the kernel object underlying the named section
+ * is reference counted, but that the call to MapViewOfFile() above
+ * does NOT increment the reference count! So, if we close the handle
+ * here, the kernel deletes the object from the kernel namespace.
+ * When a second process comes along to join the region, the kernel
+ * happily creates a new object with the same name, but completely
+ * different identity. The two processes then have distinct isolated
+ * mapped sections, not at all what was wanted. Not closing the handle
+ * here fixes this problem. We carry the handle around in the region
+ * structure so we can close it when unmap is called. Ignore malloc
+ * errors, it just means we leak the memory.
+ */
+ if (use_pagefile && infop != NULL) {
+ if (__os_malloc(dbenv,
+ sizeof(HANDLE), &infop->wnt_handle) == 0)
+ memcpy(infop->wnt_handle, &hMemory, sizeof(HANDLE));
+ } else
+ CloseHandle(hMemory);
+
+ if (is_region) {
+ /*
+ * XXX
+ * Windows/95 zeroes anonymous memory regions at last close.
+ * This means that the backing file can exist and reference
+ * the region, but the region itself is no longer initialized.
+ * If the caller is capable of creating the region, update
+ * the REGINFO structure so that they do so.
+ */
+ renv = (REGENV *)pMemory;
+ if (renv->magic == 0) {
+ if (F_ISSET(infop, REGION_CREATE_OK))
+ F_SET(infop, REGION_CREATE);
+ else {
+ (void)UnmapViewOfFile(pMemory);
+ pMemory = NULL;
+ ret = EAGAIN;
+ }
+ }
+ }
+
+ *addr = pMemory;
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_open.c b/storage/bdb/os_win32/os_open.c
new file mode 100644
index 00000000000..c8bae54d585
--- /dev/null
+++ b/storage/bdb/os_win32/os_open.c
@@ -0,0 +1,217 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_open.c,v 11.21 2002/07/12 18:56:55 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_open --
+ * Open a file descriptor.
+ */
+int
+__os_open(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int32_t flags;
+ int mode;
+ DB_FH *fhp;
+{
+ DWORD bytesWritten;
+ u_int32_t log_size, pagesize, sectorsize;
+ int access, attr, oflags, share, createflag;
+ int ret, nrepeat;
+ char *drive, dbuf[4]; /* <letter><colon><slosh><nul> */
+
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
+
+ /*
+ * The "public" interface to the __os_open routine passes around POSIX
+ * 1003.1 flags, not DB flags. If the user has defined their own open
+ * interface, use the POSIX flags.
+ */
+ if (DB_GLOBAL(j_open) != NULL) {
+ oflags = O_BINARY | O_NOINHERIT;
+
+ if (LF_ISSET(DB_OSO_CREATE))
+ oflags |= O_CREAT;
+
+ if (LF_ISSET(DB_OSO_EXCL))
+ oflags |= O_EXCL;
+
+ if (LF_ISSET(DB_OSO_RDONLY))
+ oflags |= O_RDONLY;
+ else
+ oflags |= O_RDWR;
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ oflags |= _O_SEQUENTIAL;
+ else
+ oflags |= _O_RANDOM;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ oflags |= _O_TEMPORARY;
+
+ if (LF_ISSET(DB_OSO_TRUNC))
+ oflags |= O_TRUNC;
+
+ return (__os_openhandle(dbenv, name, oflags, mode, fhp));
+ }
+
+ ret = 0;
+
+ if (LF_ISSET(DB_OSO_LOG))
+ log_size = fhp->log_size; /* XXX: Gag. */
+
+ pagesize = fhp->pagesize;
+
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = -1;
+
+ /*
+ * Otherwise, use the Windows/32 CreateFile interface so that we can
+ * play magic games with log files to get data flush effects similar
+ * to the POSIX O_DSYNC flag.
+ *
+ * !!!
+ * We currently ignore the 'mode' argument. It would be possible
+ * to construct a set of security attributes that we could pass to
+ * CreateFile that would accurately represents the mode. In worst
+ * case, this would require looking up user and all group names and
+ * creating an entry for each. Alternatively, we could call the
+ * _chmod (partial emulation) function after file creation, although
+ * this leaves us with an obvious race. However, these efforts are
+ * largely meaningless on FAT, the most common file system, which
+ * only has a "readable" and "writeable" flag, applying to all users.
+ */
+ access = GENERIC_READ;
+ if (!LF_ISSET(DB_OSO_RDONLY))
+ access |= GENERIC_WRITE;
+
+ share = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ attr = FILE_ATTRIBUTE_NORMAL;
+
+ /*
+ * Reproduce POSIX 1003.1 semantics: if O_CREATE and O_EXCL are both
+ * specified, fail, returning EEXIST, unless we create the file.
+ */
+ if (LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_EXCL))
+ createflag = CREATE_NEW; /* create only if !exist*/
+ else if (!LF_ISSET(DB_OSO_CREATE) && LF_ISSET(DB_OSO_TRUNC))
+ createflag = TRUNCATE_EXISTING; /* truncate, fail if !exist */
+ else if (LF_ISSET(DB_OSO_TRUNC))
+ createflag = CREATE_ALWAYS; /* create and truncate */
+ else if (LF_ISSET(DB_OSO_CREATE))
+ createflag = OPEN_ALWAYS; /* open or create */
+ else
+ createflag = OPEN_EXISTING; /* open only if existing */
+
+ if (LF_ISSET(DB_OSO_LOG)) {
+ F_SET(fhp, DB_FH_NOSYNC);
+ attr |= FILE_FLAG_WRITE_THROUGH;
+ }
+
+ if (LF_ISSET(DB_OSO_SEQ))
+ attr |= FILE_FLAG_SEQUENTIAL_SCAN;
+ else
+ attr |= FILE_FLAG_RANDOM_ACCESS;
+
+ if (LF_ISSET(DB_OSO_TEMP))
+ attr |= FILE_FLAG_DELETE_ON_CLOSE;
+
+ /*
+ * We can turn filesystem buffering off if the page size is a
+ * multiple of the disk's sector size. To find the sector size,
+ * we call GetDiskFreeSpace, which expects a drive name like "d:\\"
+ * or NULL for the current disk (i.e., a relative path)
+ */
+ if (LF_ISSET(DB_OSO_DIRECT) && pagesize != 0 && name[0] != '\0') {
+ if (name[1] == ':') {
+ drive = dbuf;
+ snprintf(dbuf, sizeof(dbuf), "%c:\\", name[0]);
+ } else
+ drive = NULL;
+
+ if (GetDiskFreeSpace(drive, NULL, &sectorsize, NULL, NULL) &&
+ pagesize % sectorsize == 0)
+ attr |= FILE_FLAG_NO_BUFFERING;
+ }
+
+ for (nrepeat = 1;; ++nrepeat) {
+ fhp->handle =
+ CreateFile(name, access, share, NULL, createflag, attr, 0);
+ if (fhp->handle == INVALID_HANDLE_VALUE) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_win32_errno();
+ if ((ret != ENFILE && ret != EMFILE && ret != ENOSPC) ||
+ nrepeat > 3)
+ goto err;
+
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ } else
+ break;
+ }
+
+ /*
+ * Special handling needed for log files. To get Windows to not update
+ * the MFT metadata on each write, extend the file to its maximum size.
+ * Windows will allocate all the data blocks and store them in the MFT
+ * (inode) area. In addition, flush the MFT area to disk.
+ * This strategy only works for Win/NT; Win/9X does not
+ * guarantee that the logs will be zero filled.
+ */
+ if (LF_ISSET(DB_OSO_LOG) && log_size != 0 && __os_is_winnt()) {
+ if (SetFilePointer(fhp->handle,
+ log_size - 1, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (WriteFile(fhp->handle, "\x00", 1, &bytesWritten, NULL) == 0)
+ goto err;
+ if (bytesWritten != 1)
+ goto err;
+ if (SetEndOfFile(fhp->handle) == 0)
+ goto err;
+ if (SetFilePointer(
+ fhp->handle, 0, NULL, FILE_BEGIN) == (DWORD)-1)
+ goto err;
+ if (FlushFileBuffers(fhp->handle) == 0)
+ goto err;
+ }
+
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+
+err: if (ret == 0)
+ ret = __os_win32_errno();
+ if (fhp->handle != INVALID_HANDLE_VALUE)
+ (void)CloseHandle(fhp->handle);
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_rename.c b/storage/bdb/os_win32/os_rename.c
new file mode 100644
index 00000000000..67c3846649b
--- /dev/null
+++ b/storage/bdb/os_win32/os_rename.c
@@ -0,0 +1,77 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rename.c,v 1.12 2002/07/12 18:56:55 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_rename --
+ * Rename a file.
+ */
+int
+__os_rename(dbenv, oldname, newname, flags)
+ DB_ENV *dbenv;
+ const char *oldname, *newname;
+ u_int32_t flags;
+{
+ int ret;
+ char oldbuf[MAX_PATH], newbuf[MAX_PATH];
+
+ ret = 0;
+ if (DB_GLOBAL(j_rename) != NULL) {
+ if (DB_GLOBAL(j_rename)(oldname, newname) == -1)
+ ret = __os_get_errno();
+ goto done;
+ }
+
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+
+ if (ret == EEXIST) {
+ ret = 0;
+ if (__os_is_winnt()) {
+ if (!MoveFileEx(
+ oldname, newname, MOVEFILE_REPLACE_EXISTING))
+ ret = __os_win32_errno();
+ } else {
+ /*
+ * There is no MoveFileEx for Win9x/Me, so we have to
+ * do the best we can.
+ */
+ LPTSTR FilePath;
+ if (!GetFullPathName(oldname, sizeof(oldbuf), oldbuf,
+ &FilePath) ||
+ !GetFullPathName(newname, sizeof(newbuf), newbuf,
+ &FilePath)) {
+ ret = __os_win32_errno();
+ goto done;
+ }
+
+ /*
+ * If the old and new names differ only in case, we're
+ * done.
+ */
+ if (strcasecmp(oldbuf, newbuf) == 0)
+ goto done;
+
+ (void)DeleteFile(newname);
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+ }
+ }
+
+done: if (ret != 0 && flags == 0)
+ __db_err(dbenv,
+ "Rename %s %s: %s", oldname, newname, strerror(ret));
+
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_rw.c b/storage/bdb/os_win32/os_rw.c
new file mode 100644
index 00000000000..63d1f715c53
--- /dev/null
+++ b/storage/bdb/os_win32/os_rw.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rw.c,v 11.28 2002/08/06 04:56:19 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+ if (__os_is_winnt()) {
+ ULONG64 off = (ULONG64)db_iop->pagesize * db_iop->pgno;
+ OVERLAPPED over;
+ DWORD nbytes;
+ over.Offset = (DWORD)(off & 0xffffffff);
+ over.OffsetHigh = (DWORD)(off >> 32);
+ over.hEvent = 0; /* we don't want asynchronous notifications */
+
+ switch (op) {
+ case DB_IO_READ:
+ if (DB_GLOBAL(j_read) != NULL)
+ goto slow;
+ if (!ReadFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ case DB_IO_WRITE:
+ if (DB_GLOBAL(j_write) != NULL)
+ goto slow;
+ if (!WriteFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ }
+ if (nbytes == db_iop->bytes) {
+ *niop = (size_t)nbytes;
+ return (0);
+ }
+ }
+
+slow: MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ DWORD nr;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+retry: if (DB_GLOBAL(j_read) != NULL) {
+ nr = (DWORD)DB_GLOBAL(j_read)(fhp->fd,
+ taddr, len - offset);
+ success = (nr >= 0);
+ } else {
+ success = ReadFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nr, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "read: 0x%lx, %lu: %s",
+ P_TO_ULONG(taddr),
+ (u_long)len - offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ DWORD nw;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw) {
+retry: if (DB_GLOBAL(j_write) != NULL) {
+ nw = (DWORD)DB_GLOBAL(j_write)(fhp->fd,
+ taddr, len - offset);
+ success = (nw >= 0);
+ } else {
+ success = WriteFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nw, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ }
+
+ *nwp = len;
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_seek.c b/storage/bdb/os_win32/os_seek.c
new file mode 100644
index 00000000000..40140f51534
--- /dev/null
+++ b/storage/bdb/os_win32/os_seek.c
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_seek.c,v 11.17 2002/08/06 04:56:20 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_seek --
+ * Seek to a page/byte offset in the file.
+ */
+int
+__os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t pgsize;
+ db_pgno_t pageno;
+ u_int32_t relative;
+ int isrewind;
+ DB_OS_SEEK db_whence;
+{
+ /* Yes, this really is how Microsoft have designed their API */
+ union {
+ __int64 bigint;
+ struct {
+ unsigned long low;
+ long high;
+ };
+ } offset;
+ int ret, whence;
+ DWORD from;
+
+ if (DB_GLOBAL(j_seek) != NULL) {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ ret = DB_GLOBAL(j_seek)(fhp->fd, pgsize, pageno,
+ relative, isrewind, whence);
+ } else {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ from = FILE_CURRENT;
+ break;
+ case DB_OS_SEEK_END:
+ from = FILE_END;
+ break;
+ case DB_OS_SEEK_SET:
+ from = FILE_BEGIN;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ offset.bigint = (__int64)pgsize * pageno + relative;
+ if (isrewind)
+ offset.bigint = -offset.bigint;
+
+ ret = (SetFilePointer(fhp->handle,
+ offset.low, &offset.high, from) == (DWORD) - 1) ?
+ __os_win32_errno() : 0;
+ }
+
+ if (ret != 0)
+ __db_err(dbenv, "seek: %lu %d %d: %s",
+ (u_long)pgsize * pageno + relative,
+ isrewind, db_whence, strerror(ret));
+
+ return (ret);
+}
diff --git a/storage/bdb/os_win32/os_sleep.c b/storage/bdb/os_win32/os_sleep.c
new file mode 100644
index 00000000000..12b4a7dbc2d
--- /dev/null
+++ b/storage/bdb/os_win32/os_sleep.c
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_sleep.c,v 11.8 2002/07/12 18:56:56 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_sleep --
+ * Yield the processor for a period of time.
+ */
+int
+__os_sleep(dbenv, secs, usecs)
+ DB_ENV *dbenv;
+ u_long secs, usecs; /* Seconds and microseconds. */
+{
+ COMPQUIET(dbenv, NULL);
+
+ /* Don't require that the values be normalized. */
+ for (; usecs >= 1000000; ++secs, usecs -= 1000000)
+ ;
+
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
+
+ /*
+ * It's important that we yield the processor here so that other
+ * processes or threads are permitted to run.
+ */
+ Sleep(secs * 1000 + usecs / 1000);
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_spin.c b/storage/bdb/os_win32/os_spin.c
new file mode 100644
index 00000000000..eb50b3b53ff
--- /dev/null
+++ b/storage/bdb/os_win32/os_spin.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_spin.c,v 11.11 2002/07/12 18:56:56 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_spin --
+ * Return the number of default spins before blocking.
+ */
+int
+__os_spin(dbenv)
+ DB_ENV *dbenv;
+{
+ SYSTEM_INFO SystemInfo;
+
+ /*
+ * If the application specified a value or we've already figured it
+ * out, return it.
+ */
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
+
+ /* Get the number of processors */
+ GetSystemInfo(&SystemInfo);
+
+ /*
+ * Spin 50 times per processor -- we have anecdotal evidence that this
+ * is a reasonable value.
+ */
+ if (SystemInfo.dwNumberOfProcessors > 1)
+ dbenv->tas_spins = 50 * SystemInfo.dwNumberOfProcessors;
+ else
+ dbenv->tas_spins = 1;
+ return (dbenv->tas_spins);
+}
+
+/*
+ * __os_yield --
+ * Yield the processor.
+ */
+void
+__os_yield(dbenv, usecs)
+ DB_ENV *dbenv;
+ u_long usecs;
+{
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
+ return;
+ __os_sleep(dbenv, 0, usecs);
+}
diff --git a/storage/bdb/os_win32/os_stat.c b/storage/bdb/os_win32/os_stat.c
new file mode 100644
index 00000000000..c1cba698bea
--- /dev/null
+++ b/storage/bdb/os_win32/os_stat.c
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_stat.c,v 11.22 2002/07/12 18:56:56 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ int ret;
+ DWORD attrs;
+
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+
+ ret = 0;
+ do {
+ attrs = GetFileAttributes(path);
+ if (attrs == (DWORD)-1)
+ ret = __os_win32_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
+
+ if (isdirp != NULL)
+ *isdirp = (attrs & FILE_ATTRIBUTE_DIRECTORY);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ BY_HANDLE_FILE_INFORMATION bhfi;
+ unsigned __int64 filesize;
+
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+retry: if (!GetFileInformationByHandle(fhp->handle, &bhfi)) {
+ if ((ret = __os_win32_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv,
+ "GetFileInformationByHandle: %s", strerror(ret));
+ return (ret);
+ }
+
+ filesize = ((unsigned __int64)bhfi.nFileSizeHigh << 32) +
+ bhfi.nFileSizeLow;
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t)(filesize / MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t)(filesize % MEGABYTE);
+
+ /* The filesystem blocksize is not easily available. */
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+ return (0);
+}
diff --git a/storage/bdb/os_win32/os_type.c b/storage/bdb/os_win32/os_type.c
new file mode 100644
index 00000000000..583da0aaf1e
--- /dev/null
+++ b/storage/bdb/os_win32/os_type.c
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_type.c,v 11.6 2002/01/11 15:53:08 bostic Exp $";
+#endif /* not lint */
+
+/*
+ * __os_is_winnt --
+ * Return 1 if Windows/NT, otherwise 0.
+ *
+ * PUBLIC: int __os_is_winnt __P((void));
+ */
+int
+__os_is_winnt()
+{
+ static int __os_type = -1;
+
+ /*
+ * The value of __os_type is computed only once, and cached to
+ * avoid the overhead of repeated calls to GetVersion().
+ */
+ if (__os_type == -1) {
+ if ((GetVersion() & 0x80000000) == 0)
+ __os_type = 1;
+ else
+ __os_type = 0;
+ }
+ return (__os_type);
+}
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
new file mode 100644
index 00000000000..c56390ba71f
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm
@@ -0,0 +1,1506 @@
+
+package BerkeleyDB;
+
+
+# Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+
+# The documentation for this module is at the bottom of this file,
+# after the line __END__.
+
+BEGIN { require 5.004_04 }
+
+use strict;
+use Carp;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD
+ $use_XSLoader);
+
+$VERSION = '0.20';
+
+require Exporter;
+#require DynaLoader;
+require AutoLoader;
+
+BEGIN {
+ $use_XSLoader = 1 ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+@ISA = qw(Exporter DynaLoader);
+# Items to export into callers namespace by default. Note: do not export
+# names by default without a very good reason. Use EXPORT_OK instead.
+# Do not simply export all your public functions/methods/constants.
+
+# NOTE -- Do not add to @EXPORT directly. It is written by mkconsts
+@EXPORT = qw(
+ DB_AFTER
+ DB_AGGRESSIVE
+ DB_ALREADY_ABORTED
+ DB_APPEND
+ DB_APPLY_LOGREG
+ DB_APP_INIT
+ DB_ARCH_ABS
+ DB_ARCH_DATA
+ DB_ARCH_LOG
+ DB_AUTO_COMMIT
+ DB_BEFORE
+ DB_BROADCAST_EID
+ DB_BTREE
+ DB_BTREEMAGIC
+ DB_BTREEOLDVER
+ DB_BTREEVERSION
+ DB_CACHED_COUNTS
+ DB_CDB_ALLDB
+ DB_CHECKPOINT
+ DB_CHKSUM_SHA1
+ DB_CLIENT
+ DB_CL_WRITER
+ DB_COMMIT
+ DB_CONSUME
+ DB_CONSUME_WAIT
+ DB_CREATE
+ DB_CURLSN
+ DB_CURRENT
+ DB_CXX_NO_EXCEPTIONS
+ DB_DELETED
+ DB_DELIMITER
+ DB_DIRECT
+ DB_DIRECT_DB
+ DB_DIRECT_LOG
+ DB_DIRTY_READ
+ DB_DONOTINDEX
+ DB_DUP
+ DB_DUPCURSOR
+ DB_DUPSORT
+ DB_EID_BROADCAST
+ DB_EID_INVALID
+ DB_ENCRYPT
+ DB_ENCRYPT_AES
+ DB_ENV_APPINIT
+ DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB
+ DB_ENV_CDB_ALLDB
+ DB_ENV_CREATE
+ DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB
+ DB_ENV_DIRECT_LOG
+ DB_ENV_FATAL
+ DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING
+ DB_ENV_LOGGING
+ DB_ENV_NOLOCKING
+ DB_ENV_NOMMAP
+ DB_ENV_NOPANIC
+ DB_ENV_OPEN_CALLED
+ DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK
+ DB_ENV_PRIVATE
+ DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT
+ DB_ENV_REP_LOGSONLY
+ DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT
+ DB_ENV_RPCCLIENT_GIVEN
+ DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM
+ DB_ENV_THREAD
+ DB_ENV_TXN
+ DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC
+ DB_ENV_USER_ALLOC
+ DB_ENV_YIELDCPU
+ DB_EXCL
+ DB_EXTENT
+ DB_FAST_STAT
+ DB_FCNTL_LOCKING
+ DB_FILE_ID_LEN
+ DB_FIRST
+ DB_FIXEDLEN
+ DB_FLUSH
+ DB_FORCE
+ DB_GETREC
+ DB_GET_BOTH
+ DB_GET_BOTHC
+ DB_GET_BOTH_RANGE
+ DB_GET_RECNO
+ DB_HANDLE_LOCK
+ DB_HASH
+ DB_HASHMAGIC
+ DB_HASHOLDVER
+ DB_HASHVERSION
+ DB_INCOMPLETE
+ DB_INIT_CDB
+ DB_INIT_LOCK
+ DB_INIT_LOG
+ DB_INIT_MPOOL
+ DB_INIT_TXN
+ DB_INVALID_EID
+ DB_JAVA_CALLBACK
+ DB_JOINENV
+ DB_JOIN_ITEM
+ DB_JOIN_NOSORT
+ DB_KEYEMPTY
+ DB_KEYEXIST
+ DB_KEYFIRST
+ DB_KEYLAST
+ DB_LAST
+ DB_LOCKDOWN
+ DB_LOCKMAGIC
+ DB_LOCKVERSION
+ DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK
+ DB_LOCK_DEFAULT
+ DB_LOCK_DUMP
+ DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER
+ DB_LOCK_GET
+ DB_LOCK_GET_TIMEOUT
+ DB_LOCK_INHERIT
+ DB_LOCK_MAXLOCKS
+ DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE
+ DB_LOCK_NORUN
+ DB_LOCK_NOTEXIST
+ DB_LOCK_NOTGRANTED
+ DB_LOCK_NOTHELD
+ DB_LOCK_NOWAIT
+ DB_LOCK_OLDEST
+ DB_LOCK_PUT
+ DB_LOCK_PUT_ALL
+ DB_LOCK_PUT_OBJ
+ DB_LOCK_PUT_READ
+ DB_LOCK_RANDOM
+ DB_LOCK_RECORD
+ DB_LOCK_REMOVE
+ DB_LOCK_RIW_N
+ DB_LOCK_RW_N
+ DB_LOCK_SET_TIMEOUT
+ DB_LOCK_SWITCH
+ DB_LOCK_TIMEOUT
+ DB_LOCK_TRADE
+ DB_LOCK_UPGRADE
+ DB_LOCK_UPGRADE_WRITE
+ DB_LOCK_YOUNGEST
+ DB_LOGC_BUF_SIZE
+ DB_LOGFILEID_INVALID
+ DB_LOGMAGIC
+ DB_LOGOLDVER
+ DB_LOGVERSION
+ DB_LOG_DISK
+ DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR
+ DB_MAX_PAGES
+ DB_MAX_RECORDS
+ DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE
+ DB_MPOOL_DIRTY
+ DB_MPOOL_DISCARD
+ DB_MPOOL_EXTENT
+ DB_MPOOL_LAST
+ DB_MPOOL_NEW
+ DB_MPOOL_NEW_GROUP
+ DB_MPOOL_PRIVATE
+ DB_MULTIPLE
+ DB_MULTIPLE_KEY
+ DB_MUTEXDEBUG
+ DB_MUTEXLOCKS
+ DB_NEEDSPLIT
+ DB_NEXT
+ DB_NEXT_DUP
+ DB_NEXT_NODUP
+ DB_NOCOPY
+ DB_NODUPDATA
+ DB_NOLOCKING
+ DB_NOMMAP
+ DB_NOORDERCHK
+ DB_NOOVERWRITE
+ DB_NOPANIC
+ DB_NORECURSE
+ DB_NOSERVER
+ DB_NOSERVER_HOME
+ DB_NOSERVER_ID
+ DB_NOSYNC
+ DB_NOTFOUND
+ DB_ODDFILESIZE
+ DB_OK_BTREE
+ DB_OK_HASH
+ DB_OK_QUEUE
+ DB_OK_RECNO
+ DB_OLD_VERSION
+ DB_OPEN_CALLED
+ DB_OPFLAGS_MASK
+ DB_ORDERCHKONLY
+ DB_OVERWRITE
+ DB_PAD
+ DB_PAGEYIELD
+ DB_PAGE_LOCK
+ DB_PAGE_NOTFOUND
+ DB_PANIC_ENVIRONMENT
+ DB_PERMANENT
+ DB_POSITION
+ DB_POSITIONI
+ DB_PREV
+ DB_PREV_NODUP
+ DB_PRINTABLE
+ DB_PRIORITY_DEFAULT
+ DB_PRIORITY_HIGH
+ DB_PRIORITY_LOW
+ DB_PRIORITY_VERY_HIGH
+ DB_PRIORITY_VERY_LOW
+ DB_PRIVATE
+ DB_PR_HEADERS
+ DB_PR_PAGE
+ DB_PR_RECOVERYTEST
+ DB_QAMMAGIC
+ DB_QAMOLDVER
+ DB_QAMVERSION
+ DB_QUEUE
+ DB_RDONLY
+ DB_RDWRMASTER
+ DB_RECNO
+ DB_RECNUM
+ DB_RECORDCOUNT
+ DB_RECORD_LOCK
+ DB_RECOVER
+ DB_RECOVER_FATAL
+ DB_REGION_ANON
+ DB_REGION_INIT
+ DB_REGION_MAGIC
+ DB_REGION_NAME
+ DB_REGISTERED
+ DB_RENAMEMAGIC
+ DB_RENUMBER
+ DB_REP_CLIENT
+ DB_REP_DUPMASTER
+ DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY
+ DB_REP_MASTER
+ DB_REP_NEWMASTER
+ DB_REP_NEWSITE
+ DB_REP_OUTDATED
+ DB_REP_PERMANENT
+ DB_REP_UNAVAIL
+ DB_REVSPLITOFF
+ DB_RMW
+ DB_RPC_SERVERPROG
+ DB_RPC_SERVERVERS
+ DB_RUNRECOVERY
+ DB_SALVAGE
+ DB_SECONDARY_BAD
+ DB_SEQUENTIAL
+ DB_SET
+ DB_SET_LOCK_TIMEOUT
+ DB_SET_RANGE
+ DB_SET_RECNO
+ DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT
+ DB_SNAPSHOT
+ DB_STAT_CLEAR
+ DB_SURPRISE_KID
+ DB_SWAPBYTES
+ DB_SYSTEM_MEM
+ DB_TEMPORARY
+ DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND
+ DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1
+ DB_TEST_ELECTWAIT2
+ DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG
+ DB_TEST_POSTLOGMETA
+ DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME
+ DB_TEST_POSTSYNC
+ DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE
+ DB_TEST_PREEXTOPEN
+ DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN
+ DB_TEST_PRERENAME
+ DB_TEST_SUBDB_LOCKS
+ DB_THREAD
+ DB_TIMEOUT
+ DB_TRUNCATE
+ DB_TXNMAGIC
+ DB_TXNVERSION
+ DB_TXN_ABORT
+ DB_TXN_APPLY
+ DB_TXN_BACKWARD_ALLOC
+ DB_TXN_BACKWARD_ROLL
+ DB_TXN_CKP
+ DB_TXN_FORWARD_ROLL
+ DB_TXN_GETPGNOS
+ DB_TXN_LOCK
+ DB_TXN_LOCK_2PL
+ DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST
+ DB_TXN_LOCK_OPTIMISTIC
+ DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO
+ DB_TXN_LOG_UNDO
+ DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC
+ DB_TXN_NOWAIT
+ DB_TXN_OPENFILES
+ DB_TXN_POPENFILES
+ DB_TXN_PRINT
+ DB_TXN_REDO
+ DB_TXN_SYNC
+ DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC
+ DB_UNKNOWN
+ DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY
+ DB_UPGRADE
+ DB_USE_ENVIRON
+ DB_USE_ENVIRON_ROOT
+ DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK
+ DB_VERB_RECOVERY
+ DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR
+ DB_VERIFY
+ DB_VERIFY_BAD
+ DB_VERIFY_FATAL
+ DB_VERSION_MAJOR
+ DB_VERSION_MINOR
+ DB_VERSION_PATCH
+ DB_VERSION_STRING
+ DB_VRFY_FLAGMASK
+ DB_WRITECURSOR
+ DB_WRITELOCK
+ DB_WRITEOPEN
+ DB_WRNOSYNC
+ DB_XA_CREATE
+ DB_XIDDATASIZE
+ DB_YIELDCPU
+ );
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
+
+#bootstrap BerkeleyDB $VERSION;
+if ($use_XSLoader)
+ { XSLoader::load("BerkeleyDB", $VERSION)}
+else
+ { bootstrap BerkeleyDB $VERSION }
+
+# Preloaded methods go here.
+
+
+sub ParseParameters($@)
+{
+ my ($default, @rest) = @_ ;
+ my (%got) = %$default ;
+ my (@Bad) ;
+ my ($key, $value) ;
+ my $sub = (caller(1))[3] ;
+ my %options = () ;
+ local ($Carp::CarpLevel) = 1 ;
+
+ # allow the options to be passed as a hash reference or
+ # as the complete hash.
+ if (@rest == 1) {
+
+ croak "$sub: parameter is not a reference to a hash"
+ if ref $rest[0] ne "HASH" ;
+
+ %options = %{ $rest[0] } ;
+ }
+ elsif (@rest >= 2) {
+ %options = @rest ;
+ }
+
+ while (($key, $value) = each %options)
+ {
+ $key =~ s/^-// ;
+
+ if (exists $default->{$key})
+ { $got{$key} = $value }
+ else
+ { push (@Bad, $key) }
+ }
+
+ if (@Bad) {
+ my ($bad) = join(", ", @Bad) ;
+ croak "unknown key value(s) @Bad" ;
+ }
+
+ return \%got ;
+}
+
+use UNIVERSAL qw( isa ) ;
+
+sub env_remove
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Config => { name => value, name => value }
+ # [ -Flags => DB_INIT_LOCK| ]
+ # ;
+
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Flags => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %{$got->{Config}}) {
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ return _env_remove($got) ;
+}
+
+sub db_remove
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ return _db_remove($got);
+}
+
+sub db_rename
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Newname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Must specify a Subname")
+ if ! defined $got->{Subname} ;
+
+ croak("Must specify a Newname")
+ if ! defined $got->{Newname} ;
+
+ return _db_rename($got);
+}
+
+sub db_verify
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Outfile => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ return _db_verify($got);
+}
+
+package BerkeleyDB::Env ;
+
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+use vars qw( %valid_config_keys ) ;
+
+sub isaFilehandle
+{
+ my $fh = shift ;
+
+ return ((isa($fh,'GLOB') or isa(\$fh,'GLOB')) and defined fileno($fh) )
+
+}
+
+%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR
+DB_TMP_DIR ) ;
+
+sub new
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Mode => mode, ]
+ # [ -Config => { name => value, name => value }
+ # [ -ErrFile => filename, ]
+ # [ -ErrPrefix => "string", ]
+ # [ -Flags => DB_INIT_LOCK| ]
+ # [ -Set_Flags => $flags,]
+ # [ -Cachesize => number ]
+ # [ -LockDetect => ]
+ # [ -Verbose => boolean ]
+ # ;
+
+ my $pkg = shift ;
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Server => undef,
+ Mode => 0666,
+ ErrFile => undef,
+ ErrPrefix => undef,
+ Flags => 0,
+ SetFlags => 0,
+ Cachesize => 0,
+ LockDetect => 0,
+ Verbose => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ croak("ErrFile parameter must be a file name")
+ if ref $got->{ErrFile} ;
+ #if (!isaFilehandle($got->{ErrFile})) {
+ # my $handle = new IO::File ">$got->{ErrFile}"
+# or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+# $got->{ErrFile} = $handle ;
+# }
+ }
+
+
+ my %config ;
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ %config = %{ $got->{Config} } ;
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %config) {
+ if ($BerkeleyDB::db_version >= 3.1 && ! $valid_config_keys{$k} ) {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error ;
+ }
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ my ($addr) = _db_appinit($pkg, $got) ;
+ my $obj ;
+ $obj = bless [$addr] , $pkg if $addr ;
+ if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) {
+ my ($k, $v);
+ while (($k, $v) = each %config) {
+ if ($k eq 'DB_DATA_DIR')
+ { $obj->set_data_dir($v) }
+ elsif ($k eq 'DB_LOG_DIR')
+ { $obj->set_lg_dir($v) }
+ elsif ($k eq 'DB_TEMP_DIR' || $k eq 'DB_TMP_DIR')
+ { $obj->set_tmp_dir($v) }
+ else {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error
+ }
+ }
+ }
+ return $obj ;
+}
+
+
+sub TxnMgr
+{
+ my $env = shift ;
+ my ($addr) = $env->_TxnMgr() ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::TxnMgr" if $addr ;
+ return $obj ;
+}
+
+sub txn_begin
+{
+ my $env = shift ;
+ my ($addr) = $env->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Hash ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Hash specific
+ Ffactor => 0,
+ Nelem => 0,
+ Hash => undef,
+ DupCompare => undef,
+
+ # BerkeleyDB specific
+ ReadKey => undef,
+ WriteKey => undef,
+ ReadValue => undef,
+ WriteValue => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_hash($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*TIEHASH = \&new ;
+
+
+package BerkeleyDB::Btree ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Btree specific
+ Minkey => 0,
+ Compare => undef,
+ DupCompare => undef,
+ Prefix => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_btree($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Btree::TIEHASH = \&BerkeleyDB::Btree::new ;
+
+
+package BerkeleyDB::Recno ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Recno specific
+ Delim => undef,
+ Len => undef,
+ Pad => undef,
+ Source => undef,
+ ArrayBase => 1, # lowest index in array
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_recno($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Recno::TIEARRAY = \&BerkeleyDB::Recno::new ;
+*BerkeleyDB::Recno::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Queue ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Queue specific
+ Len => undef,
+ Pad => undef,
+ ArrayBase => 1, # lowest index in array
+ ExtentSize => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_queue($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
+
+sub UNSHIFT
+{
+ my $self = shift;
+ croak "unshift is unsupported with Queue databases";
+}
+
+## package BerkeleyDB::Text ;
+##
+## use vars qw(@ISA) ;
+## @ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+## use UNIVERSAL qw( isa ) ;
+## use Carp ;
+##
+## sub new
+## {
+## my $self = shift ;
+## my $got = BerkeleyDB::ParseParameters(
+## {
+## # Generic Stuff
+## Filename => undef,
+## #Flags => BerkeleyDB::DB_CREATE(),
+## Flags => 0,
+## Property => 0,
+## Mode => 0666,
+## Cachesize => 0,
+## Lorder => 0,
+## Pagesize => 0,
+## Env => undef,
+## #Tie => undef,
+## Txn => undef,
+##
+## # Recno specific
+## Delim => undef,
+## Len => undef,
+## Pad => undef,
+## Btree => undef,
+## }, @_) ;
+##
+## croak("Env not of type BerkeleyDB::Env")
+## if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+##
+## croak("Txn not of type BerkeleyDB::Txn")
+## if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+##
+## croak("-Tie needs a reference to an array")
+## if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+##
+## # rearange for recno
+## $got->{Source} = $got->{Filename} if defined $got->{Filename} ;
+## delete $got->{Filename} ;
+## $got->{Fname} = $got->{Btree} if defined $got->{Btree} ;
+## return BerkeleyDB::Recno::_db_open_recno($self, $got);
+## }
+##
+## *BerkeleyDB::Text::TIEARRAY = \&BerkeleyDB::Text::new ;
+## *BerkeleyDB::Text::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Unknown ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr, $type) = _db_open_unknown($got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr], "BerkeleyDB::$type" ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+
+package BerkeleyDB::_tiedHash ;
+
+use Carp ;
+
+#sub TIEHASH
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiehash REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \%hash ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to a hash")
+ if defined $ref and $ref !~ /HASH/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie %{ $ref }, "BerkeleyDB::_tiedHash", $self ;
+ return undef ;
+}
+
+
+sub TIEHASH
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) == 0 ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+ $self->db_del($key) ;
+}
+
+sub CLEAR
+{
+ my $self = shift ;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ while ($cursor->c_get($key, $value, BerkeleyDB::DB_PREV()) == 0)
+ { $cursor->c_del() }
+ #1 while $cursor->c_del() == 0 ;
+ # cursor will self-destruct
+}
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieHash::DESTROY\n" ;
+# $self->{Cursor}->c_close() if $self->{Cursor} ;
+#}
+
+package BerkeleyDB::_tiedArray ;
+
+use Carp ;
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \@array ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to an array")
+ if defined $ref and $ref !~ /ARRAY/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie @{ $ref }, "BerkeleyDB::_tiedArray", $self ;
+ return undef ;
+}
+
+
+#sub TIEARRAY
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiearray REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub TIEARRAY
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+*CLEAR = \&BerkeleyDB::_tiedHash::CLEAR ;
+*FIRSTKEY = \&BerkeleyDB::_tiedHash::FIRSTKEY ;
+*NEXTKEY = \&BerkeleyDB::_tiedHash::NEXTKEY ;
+
+sub EXTEND {} # don't do anything with EXTEND
+
+
+sub SHIFT
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+
+sub UNSHIFT
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) ;
+ if ($status == 0)
+ {
+ foreach $value (reverse @_)
+ {
+ $key = 0 ;
+ $cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
+ }
+ }
+ elsif ($status == BerkeleyDB::DB_NOTFOUND())
+ {
+ $key = 0 ;
+ foreach $value (@_)
+ {
+ $self->db_put($key++, $value) ;
+ }
+ }
+ }
+}
+
+sub PUSH
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (-1, 0) ;
+ my $cursor = $self->db_cursor() ;
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) ;
+ if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND())
+ {
+ $key = -1 if $status != 0 and $self->type != BerkeleyDB::DB_RECNO() ;
+ foreach $value (@_)
+ {
+ ++ $key ;
+ $status = $self->db_put($key, $value) ;
+ }
+ }
+
+# can use this when DB_APPEND is fixed.
+# foreach $value (@_)
+# {
+# my $status = $cursor->c_put($key, $value, BerkeleyDB::DB_AFTER()) ;
+#print "[$status]\n" ;
+# }
+ }
+}
+
+sub POP
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+sub SPLICE
+{
+ my $self = shift;
+ croak "SPLICE is not implemented yet" ;
+}
+
+*shift = \&SHIFT ;
+*unshift = \&UNSHIFT ;
+*push = \&PUSH ;
+*pop = \&POP ;
+*clear = \&CLEAR ;
+*length = \&FETCHSIZE ;
+
+sub STORESIZE
+{
+ croak "STORESIZE is not implemented yet" ;
+#print "STORESIZE @_\n" ;
+# my $self = shift;
+# my $length = shift ;
+# my $current_length = $self->FETCHSIZE() ;
+#print "length is $current_length\n";
+#
+# if ($length < $current_length) {
+#print "Make smaller $length < $current_length\n" ;
+# my $key ;
+# for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+# { $self->db_del($key) }
+# }
+# elsif ($length > $current_length) {
+#print "Make larger $length > $current_length\n" ;
+# $self->db_put($length-1, "") ;
+# }
+# else { print "stay the same\n" }
+
+}
+
+
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieArray::DESTROY\n" ;
+#}
+
+
+package BerkeleyDB::Common ;
+
+
+use Carp ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub Txn
+{
+ my $self = shift ;
+ my $txn = shift ;
+ #print "BerkeleyDB::Common::Txn db [$self] txn [$txn]\n" ;
+ if ($txn) {
+ $self->_Txn($txn) ;
+ push @{ $txn }, $self ;
+ }
+ else {
+ $self->_Txn() ;
+ }
+ #print "end BerkeleyDB::Common::Txn \n";
+}
+
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+ my $cursor = $db->db_cursor() ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $cursor->c_get($key, $value, BerkeleyDB::DB_SET()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $cursor->c_get($key, $value, BerkeleyDB::DB_NEXT()) ) {
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+sub db_cursor
+{
+ my $db = shift ;
+ my ($addr) = $db->_db_cursor(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub db_join
+{
+ croak 'Usage: $db->BerkeleyDB::Common::db_join([cursors], flags=0)'
+ if @_ < 2 || @_ > 3 ;
+ my $db = shift ;
+ my ($addr) = $db->_db_join(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db, $_[0]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Cursor ;
+
+sub c_close
+{
+ my $cursor = shift ;
+ $cursor->[1] = "" ;
+ return $cursor->_c_close() ;
+}
+
+sub c_dup
+{
+ my $cursor = shift ;
+ my ($addr) = $cursor->_c_dup(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $cursor->[1]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::TxnMgr ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub txn_begin
+{
+ my $txnmgr = shift ;
+ my ($addr) = $txnmgr->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $txnmgr] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Txn ;
+
+sub Txn
+{
+ my $self = shift ;
+ my $db ;
+ # keep a reference to each db in the txn object
+ foreach $db (@_) {
+ $db->_Txn($self) ;
+ push @{ $self}, $db ;
+ }
+}
+
+sub txn_commit
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_commit() ;
+ return $status ;
+}
+
+sub txn_abort
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_abort() ;
+ return $status ;
+}
+
+sub disassociate
+{
+ my $self = shift ;
+ my $db ;
+ while ( @{ $self } > 2) {
+ $db = pop @{ $self } ;
+ $db->Txn() ;
+ }
+ #print "end disassociate\n" ;
+}
+
+
+sub DESTROY
+{
+ my $self = shift ;
+
+ $self->disassociate() ;
+ # first close the close the transaction
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Term ;
+
+END
+{
+ close_everything() ;
+}
+
+
+package BerkeleyDB ;
+
+
+
+# Autoload methods go after =cut, and are processed by the autosplit program.
+
+1;
+__END__
+
+
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
new file mode 100644
index 00000000000..60f30e2abfb
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod
@@ -0,0 +1,1792 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+ $status = $env->set_flags()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $env->set_flags(bitmask, 1|0);
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x or greater.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x or greater.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or better.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
new file mode 100644
index 00000000000..4a848f5388d
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
@@ -0,0 +1,1559 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+ $status = $env->set_flags()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $env->set_flags(bitmask, 1|0);
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+## simpleHash
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+## simpleHash2
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+##dupHash
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+##dupSortHash
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+## btreeSimple
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+## btreeSortOrder
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x or greater.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+## simpleRecno
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x or greater.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x or greater.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+## nullFilter
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+## intFilter
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or better.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
new file mode 100644
index 00000000000..531b38a655f
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs
@@ -0,0 +1,3643 @@
+/*
+
+ BerkeleyDB.xs -- Perl 5 interface to Berkeley DB version 2 & 3
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Please refer to the COPYRIGHT section in
+
+ Changes:
+ 0.01 - First Alpha Release
+ 0.02 -
+
+*/
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PERL_POLLUTE
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "ppport.h"
+
+
+/* XSUB.h defines a macro called abort */
+/* This clashes with the txn abort method in Berkeley DB 4.x */
+/* This is a problem with ActivePerl (at least) */
+
+#ifdef _WIN32
+# ifdef abort
+# undef abort
+# endif
+# ifdef fopen
+# undef fopen
+# endif
+# ifdef fclose
+# undef fclose
+# endif
+#endif
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+#undef __attribute__
+
+#ifdef USE_PERLIO
+# define GetFILEptr(sv) PerlIO_findFILE(IoOFP(sv_2io(sv)))
+#else
+# define GetFILEptr(sv) IoOFP(sv_2io(sv))
+#endif
+
+#include <db.h>
+
+/* Check the version of Berkeley DB */
+
+#ifndef DB_VERSION_MAJOR
+#ifdef HASHMAGIC
+#error db.h is from Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+#else
+#error db.h is not for Berkeley DB at all.
+#endif
+#endif
+
+#if (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6) ||\
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 6 && DB_VERSION_PATCH < 4)
+# error db.h is from Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
+#endif
+
+
+#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
+# define IS_DB_3_0_x
+#endif
+
+#if DB_VERSION_MAJOR >= 3
+# define AT_LEAST_DB_3
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_3_1
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 2) ||\
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 2 && DB_VERSION_PATCH >= 6)
+# define AT_LEAST_DB_3_2_6
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 3)
+# define AT_LEAST_DB_3_3
+#endif
+
+#if DB_VERSION_MAJOR >= 4
+# define AT_LEAST_DB_4
+#endif
+
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#define DBM_FILTERING
+#define STRICT_CLOSE
+/* #define ALLOW_RECNO_OFFSET */
+/* #define TRACE */
+
+#if DB_VERSION_MAJOR == 2 && ! defined(DB_LOCK_DEADLOCK)
+# define DB_LOCK_DEADLOCK EAGAIN
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#if DB_VERSION_MAJOR == 2
+# define DB_QUEUE 4
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef AT_LEAST_DB_3_2
+# define DB_callback DB * db,
+#else
+# define DB_callback
+#endif
+
+#if DB_VERSION_MAJOR > 2
+typedef struct {
+ int db_lorder;
+ size_t db_cachesize;
+ size_t db_pagesize;
+
+
+ void *(*db_malloc) __P((size_t));
+ int (*dup_compare)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t bt_maxkey;
+ u_int32_t bt_minkey;
+ int (*bt_compare)
+ __P((DB_callback const DBT *, const DBT *));
+ size_t (*bt_prefix)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+ u_int32_t (*h_hash)
+ __P((DB_callback const void *, u_int32_t));
+
+ int re_pad;
+ int re_delim;
+ u_int32_t re_len;
+ char *re_source;
+
+#define DB_DELIMITER 0x0001
+#define DB_FIXEDLEN 0x0008
+#define DB_PAD 0x0010
+ u_int32_t flags;
+ u_int32_t q_extentsize;
+} DB_INFO ;
+
+#endif /* DB_VERSION_MAJOR > 2 */
+
+typedef struct {
+ int Status ;
+ /* char ErrBuff[1000] ; */
+ SV * ErrPrefix ;
+ FILE * ErrHandle ;
+ DB_ENV * Env ;
+ int open_dbs ;
+ int TxnMgrStatus ;
+ int active ;
+ bool txn_enabled ;
+ } BerkeleyDB_ENV_type ;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ BerkeleyDB_ENV_type * parent_env ;
+ DB * dbp ;
+ SV * compare ;
+ bool in_compare ;
+ SV * dup_compare ;
+ bool in_dup_compare ;
+ SV * prefix ;
+ bool in_prefix ;
+ SV * hash ;
+ bool in_hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ int open_cursors ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_type;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ BerkeleyDB_type * parent_db ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_Cursor_type;
+
+typedef struct {
+ BerkeleyDB_ENV_type * env ;
+ } BerkeleyDB_TxnMgr_type ;
+
+#if 1
+typedef struct {
+ int Status ;
+ DB_TXN * txn ;
+ int active ;
+ } BerkeleyDB_Txn_type ;
+#else
+typedef DB_TXN BerkeleyDB_Txn_type ;
+#endif
+
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Raw ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB ;
+typedef void * BerkeleyDB__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue__Raw ;
+typedef BerkeleyDB_Cursor_type BerkeleyDB__Cursor_type ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Inner ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Raw ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Inner ;
+#if 0
+typedef DB_LOG * BerkeleyDB__Log ;
+typedef DB_LOCKTAB * BerkeleyDB__Lock ;
+#endif
+typedef DBT DBTKEY ;
+typedef DBT DBT_OPT ;
+typedef DBT DBT_B ;
+typedef DBT DBTKEY_B ;
+typedef DBT DBTVALUE ;
+typedef void * PV_or_NULL ;
+typedef PerlIO * IO_or_NULL ;
+typedef int DualType ;
+
+static void
+hash_delete(char * hash, char * key);
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+#ifdef ALLOW_RECNO_OFFSET
+# define RECNO_BASE db->array_base
+#else
+# define RECNO_BASE 1
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define flagSet_DB2(i, f) i |= f
+#else
+# define flagSet_DB2(i, f)
+#endif
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(bitmask) (flags & (bitmask))
+#else
+# define flagSet(bitmask) ((flags & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define BackRef internal
+#else
+# if DB_VERSION_MAJOR == 3 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0)
+# define BackRef cj_internal
+# else
+# define BackRef api_internal
+# endif
+#endif
+
+#define ERR_BUFF "BerkeleyDB::Error"
+
+#define ZMALLOC(to, typ) ((to = (typ *)safemalloc(sizeof(typ))), \
+ Zero(to,1,typ))
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#if 1
+#define getInnerObject(x) (*av_fetch((AV*)SvRV(x), 0, FALSE))
+#else
+#define getInnerObject(x) ((SV*)SvRV(sv))
+#endif
+
+#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
+
+#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = SvIV(sv)
+#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = GetFILEptr(sv)
+#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = sv
+#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPV(sv,PL_na)
+#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPVX(sv)
+#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(getInnerObject(sv)) ; \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ HV * hv = (HV *)GetInternalObject(sv); \
+ SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
+ IV tmp = SvIV(*svp); \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(GetInternalObject(sv));\
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define LastDBerror DB_RUNRECOVERY
+
+#define setDUALerrno(var, err) \
+ sv_setnv(var, (double)err) ; \
+ sv_setpv(var, ((err) ? db_strerror(err) : "")) ;\
+ SvNOK_on(var);
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputValue_B(arg, name) \
+ { if (RETVAL == 0) { \
+ if (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO)){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ } \
+ DBM_ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (!db->recno_or_queue) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE); \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define OutputKey_B(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->recno_or_queue || \
+ (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO))){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define SetPartial(data,db) \
+ data.flags = db->partial ; \
+ data.dlen = db->dlen ; \
+ data.doff = db->doff ;
+
+#define ckActive(active, type) \
+ { \
+ if (!active) \
+ softCrash("%s is already closed", type) ; \
+ }
+
+#define ckActive_Environment(a) ckActive(a, "Environment")
+#define ckActive_TxnMgr(a) ckActive(a, "Transaction Manager")
+#define ckActive_Transaction(a) ckActive(a, "Transaction")
+#define ckActive_Database(a) ckActive(a, "Database")
+#define ckActive_Cursor(a) ckActive(a, "Cursor")
+
+/* Internal Global Data */
+static db_recno_t Value ;
+static db_recno_t zero = 0 ;
+static BerkeleyDB CurrentDB ;
+
+static DBTKEY empty ;
+#if 0
+static char ErrBuff[1000] ;
+#endif
+
+#ifdef AT_LEAST_DB_3_3
+# if PERL_REVISION == 5 && PERL_VERSION <= 4
+
+/* saferealloc in perl5.004 will croak if it is given a NULL pointer*/
+void *
+MyRealloc(void * ptr, size_t size)
+{
+ if (ptr == NULL )
+ return safemalloc(size) ;
+ else
+ return saferealloc(ptr, size) ;
+}
+
+# else
+# define MyRealloc saferealloc
+# endif
+#endif
+
+static char *
+my_strdup(const char *s)
+{
+ if (s == NULL)
+ return NULL ;
+
+ {
+ MEM_SIZE l = strlen(s);
+ char *s1 = (char *)safemalloc(l);
+
+ Copy(s, s1, (MEM_SIZE)l, char);
+ return s1;
+ }
+}
+
+#if DB_VERSION_MAJOR == 2
+static char *
+db_strerror(int err)
+{
+ if (err == 0)
+ return "" ;
+
+ if (err > 0)
+ return Strerror(err) ;
+
+ switch (err) {
+ case DB_INCOMPLETE:
+ return ("DB_INCOMPLETE: Sync was unable to complete");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return (
+ "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_LOCK_NOTHELD:
+ return ("DB_LOCK_NOTHELD: Lock not held by locker");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ default:
+ return "Unknown Error" ;
+
+ }
+}
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef TRACE
+#if DB_VERSION_MAJOR > 2
+static char *
+my_db_strerror(int err)
+{
+ static char buffer[1000] ;
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ sprintf(buffer, "%d: %s", err, db_strerror(err)) ;
+ if (err && sv) {
+ strcat(buffer, ", ") ;
+ strcat(buffer, SvPVX(sv)) ;
+ }
+ return buffer;
+}
+#endif
+#endif
+
+static void
+close_everything(void)
+{
+ dTHR;
+ Trace(("close_everything\n")) ;
+ /* Abort All Transactions */
+ {
+ BerkeleyDB__Txn__Raw tid ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
+ while ( (he = hv_iternext(hv)) ) {
+ tid = * (BerkeleyDB__Txn__Raw *) hv_iterkey(he, &len) ;
+ Trace((" Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
+ if (tid->active) {
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
+ txn_abort(tid->txn);
+#endif
+ ++ closed ;
+ }
+ tid->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_txns aborted %d of %d transactios\n",closed, all)) ;
+ }
+
+ /* Close All Cursors */
+ {
+ BerkeleyDB__Cursor db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void) hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB__Cursor*) hv_iterkey(he, &len) ;
+ Trace((" Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
+ if (db->active) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_cursors closed %d of %d cursors\n",closed, all)) ;
+ }
+
+ /* Close All Databases */
+ {
+ BerkeleyDB db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB*) hv_iterkey(he, &len) ;
+ Trace((" Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
+ if (db->active) {
+ (db->dbp->close)(db->dbp, 0) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_dbs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ /* Close All Environments */
+ {
+ BerkeleyDB__Env env ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ env = * (BerkeleyDB__Env*) hv_iterkey(he, &len) ;
+ Trace((" Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
+ if (env->active) {
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ ++ closed ;
+ }
+ env->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_envs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ Trace(("end close_everything\n")) ;
+
+}
+
+static void
+destroyDB(BerkeleyDB db)
+{
+ dTHR;
+ if (! PL_dirty && db->active) {
+ -- db->open_cursors ;
+ ((db->dbp)->close)(db->dbp, 0) ;
+ }
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->dup_compare)
+ SvREFCNT_dec(db->dup_compare) ;
+#ifdef AT_LEAST_DB_3_3
+ if (db->associated && !db->secondary_db)
+ SvREFCNT_dec(db->associated) ;
+#endif
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ if (db->filename)
+ Safefree(db->filename) ;
+ Safefree(db) ;
+}
+
+static int
+softCrash(const char *pat, ...)
+{
+ char buffer1 [500] ;
+ char buffer2 [500] ;
+ va_list args;
+ va_start(args, pat);
+
+ Trace(("softCrash: %s\n", pat)) ;
+
+#define ABORT_PREFIX "BerkeleyDB Aborting: "
+
+ /* buffer = (char*) safemalloc(strlen(pat) + strlen(ABORT_PREFIX) + 1) ; */
+ strcpy(buffer1, ABORT_PREFIX) ;
+ strcat(buffer1, pat) ;
+
+ vsprintf(buffer2, buffer1, args) ;
+
+ croak(buffer2);
+
+ /* NOTREACHED */
+ va_end(args);
+ return 1 ;
+}
+
+
+static I32
+GetArrayLength(BerkeleyDB db)
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL = 0 ;
+ DBC * cursor ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor) == 0 )
+#else
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor, 0) == 0 )
+#endif
+ {
+ RETVAL = cursor->c_get(cursor, &key, &value, DB_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+ cursor->c_close(cursor) ;
+ }
+
+ Trace(("GetArrayLength got %d\n", RETVAL)) ;
+ return ((I32)RETVAL) ;
+}
+
+#if 0
+
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ Trace(("GetRecnoKey start value = %d\n", value)) ;
+ if (db->recno_or_queue && value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + RECNO_BASE <= 0)
+ softCrash("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + RECNO_BASE ;
+ }
+ else
+ ++ value ;
+
+ Trace(("GetRecnoKey end value = %d\n", value)) ;
+
+ return value ;
+}
+
+#else /* ! 0 */
+
+#if 0
+#ifdef ALLOW_RECNO_OFFSET
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ if (value + RECNO_BASE < 1)
+ softCrash("key value %d < base (%d)", (value), RECNO_BASE?0:1) ;
+ return value + RECNO_BASE ;
+}
+
+#else
+#endif /* ALLOW_RECNO_OFFSET */
+#endif /* 0 */
+
+#define GetRecnoKey(db, value) ((value) + RECNO_BASE )
+
+#endif /* 0 */
+
+#if 0
+static SV *
+GetInternalObject(SV * sv)
+{
+ SV * info = (SV*) NULL ;
+ SV * s ;
+ MAGIC * mg ;
+
+ Trace(("in GetInternalObject %d\n", sv)) ;
+ if (sv == NULL || !SvROK(sv))
+ return NULL ;
+
+ s = SvRV(sv) ;
+ if (SvMAGICAL(s))
+ {
+ if (SvTYPE(s) == SVt_PVHV || SvTYPE(s) == SVt_PVAV)
+ mg = mg_find(s, 'P') ;
+ else
+ mg = mg_find(s, 'q') ;
+
+ /* all this testing is probably overkill, but till I know more
+ about global destruction it stays.
+ */
+ /* if (mg && mg->mg_obj && SvRV(mg->mg_obj) && SvPVX(SvRV(mg->mg_obj))) */
+ if (mg && mg->mg_obj && SvRV(mg->mg_obj) )
+ info = SvRV(mg->mg_obj) ;
+ else
+ info = s ;
+ }
+
+ Trace(("end of GetInternalObject %d\n", info)) ;
+ return info ;
+}
+#endif
+
+static int
+btree_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("in btree_compare - expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+ return (retval) ;
+
+}
+
+static int
+dup_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ Trace(("In dup_compare \n")) ;
+ if (!CurrentDB)
+ softCrash("Internal Error - No CurrentDB in dup_compare") ;
+ if (CurrentDB->dup_compare == NULL)
+ softCrash("in dup_compare: no callback specified for database '%s'", CurrentDB->filename) ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->dup_compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("dup_compare: expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+ return (retval) ;
+
+}
+
+static size_t
+btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("btree_prefix: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+
+ return (retval) ;
+}
+
+static u_int32_t
+hash_cb(DB_callback const void * data, u_int32_t size)
+{
+ dSP ;
+ int retval ;
+ int count ;
+ BerkeleyDB keepDB = CurrentDB ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("hash_cb: expected 1 return value from hash sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ CurrentDB = keepDB ;
+
+ return (retval) ;
+}
+
+#ifdef AT_LEAST_DB_3_3
+
+static int
+associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey)
+{
+ dSP ;
+ char * pk_dat, * pd_dat, *sk_dat ;
+ int retval ;
+ int count ;
+ SV * skey_SV ;
+
+ Trace(("In associate_cb \n")) ;
+ if (((BerkeleyDB)db->BackRef)->associated == NULL){
+ Trace(("No Callback registered\n")) ;
+ return EINVAL ;
+ }
+
+ skey_SV = newSVpv("",0);
+
+
+ pk_dat = (char*) pkey->data ;
+ pd_dat = (char*) pdata->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (pkey->size == 0)
+ pk_dat = "" ;
+ if (pdata->size == 0)
+ pd_dat = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(pk_dat,pkey->size)));
+ PUSHs(sv_2mortal(newSVpvn(pd_dat,pdata->size)));
+ PUSHs(sv_2mortal(skey_SV));
+ PUTBACK ;
+
+ Trace(("calling associated cb\n"));
+ count = perl_call_sv(((BerkeleyDB)db->BackRef)->associated, G_SCALAR);
+ Trace(("called associated cb\n"));
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("associate: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+
+ /* retrieve the secondary key */
+ DBT_clear(*skey);
+ skey->flags = DB_DBT_APPMALLOC;
+ skey->size = SvCUR(skey_SV);
+ skey->data = (char*)safemalloc(skey->size);
+ memcpy(skey->data, SvPVX(skey_SV), skey->size);
+ Trace(("key is %d -- %.*s\n", skey->size, skey->size, skey->data));
+
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+#endif /* AT_LEAST_DB_3_3 */
+
+static void
+db_errcall_cb(const char * db_errpfx, char * buffer)
+{
+#if 0
+
+ if (db_errpfx == NULL)
+ db_errpfx = "" ;
+ if (buffer == NULL )
+ buffer = "" ;
+ ErrBuff[0] = '\0';
+ if (strlen(db_errpfx) + strlen(buffer) + 3 <= 1000) {
+ if (*db_errpfx != '\0') {
+ strcat(ErrBuff, db_errpfx) ;
+ strcat(ErrBuff, ": ") ;
+ }
+ strcat(ErrBuff, buffer) ;
+ }
+
+#endif
+
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+static SV *
+readHash(HV * hash, char * key)
+{
+ SV ** svp;
+ svp = hv_fetch(hash, key, strlen(key), FALSE);
+ if (svp && SvOK(*svp))
+ return *svp ;
+ return NULL ;
+}
+
+static void
+hash_delete(char * hash, char * key)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
+}
+
+static void
+hash_store_iv(char * hash, char * key, IV value)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void)hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
+ /* printf("hv_store returned %d\n", ret) ; */
+}
+
+static void
+hv_store_iv(HV * hash, char * key, IV value)
+{
+ hv_store(hash, key, strlen(key), newSViv(value), 0);
+}
+
+static BerkeleyDB
+my_db_open(
+ BerkeleyDB db ,
+ SV * ref,
+ SV * ref_dbenv ,
+ BerkeleyDB__Env dbenv ,
+ BerkeleyDB__Txn txn,
+ const char * file,
+ const char * subname,
+ DBTYPE type,
+ int flags,
+ int mode,
+ DB_INFO * info
+ )
+{
+ DB_ENV * env = NULL ;
+ BerkeleyDB RETVAL = NULL ;
+ DB * dbp ;
+ int Status ;
+ DB_TXN* txnid = NULL ;
+
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
+
+ CurrentDB = db ;
+ if (dbenv)
+ env = dbenv->Env ;
+
+ if (txn)
+ txnid = txn->txn;
+
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] txn [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, txn, file, subname, type, flags, mode)) ;
+
+#if DB_VERSION_MAJOR == 2
+ if (subname)
+ softCrash("Subname needs Berkeley DB 3 or better") ;
+#endif
+
+#if DB_VERSION_MAJOR > 2
+ Status = db_create(&dbp, env, 0) ;
+ Trace(("db_create returned %s\n", my_db_strerror(Status))) ;
+ if (Status)
+ return RETVAL ;
+
+#ifdef AT_LEAST_DB_3_3
+ if (! env) {
+ dbp->set_alloc(dbp, safemalloc, MyRealloc, safefree) ;
+ dbp->set_errcall(dbp, db_errcall_cb) ;
+ }
+#endif
+
+ if (info->re_source) {
+ Status = dbp->set_re_source(dbp, info->re_source) ;
+ Trace(("set_re_source [%s] returned %s\n",
+ info->re_source, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_cachesize) {
+ Status = dbp->set_cachesize(dbp, 0, info->db_cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ info->db_cachesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_lorder) {
+ Status = dbp->set_lorder(dbp, info->db_lorder) ;
+ Trace(("set_lorder [%d] returned %s\n",
+ info->db_lorder, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_pagesize) {
+ Status = dbp->set_pagesize(dbp, info->db_pagesize) ;
+ Trace(("set_pagesize [%d] returned %s\n",
+ info->db_pagesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_ffactor) {
+ Status = dbp->set_h_ffactor(dbp, info->h_ffactor) ;
+ Trace(("set_h_ffactor [%d] returned %s\n",
+ info->h_ffactor, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_nelem) {
+ Status = dbp->set_h_nelem(dbp, info->h_nelem) ;
+ Trace(("set_h_nelem [%d] returned %s\n",
+ info->h_nelem, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_minkey) {
+ Status = dbp->set_bt_minkey(dbp, info->bt_minkey) ;
+ Trace(("set_bt_minkey [%d] returned %s\n",
+ info->bt_minkey, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_compare) {
+ Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
+ Trace(("set_bt_compare [%p] returned %s\n",
+ info->bt_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_hash) {
+ Status = dbp->set_h_hash(dbp, info->h_hash) ;
+ Trace(("set_h_hash [%d] returned %s\n",
+ info->h_hash, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->dup_compare) {
+ Status = dbp->set_dup_compare(dbp, info->dup_compare) ;
+ Trace(("set_dup_compare [%d] returned %s\n",
+ info->dup_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_prefix) {
+ Status = dbp->set_bt_prefix(dbp, info->bt_prefix) ;
+ Trace(("set_bt_prefix [%d] returned %s\n",
+ info->bt_prefix, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_len) {
+ Status = dbp->set_re_len(dbp, info->re_len) ;
+ Trace(("set_re_len [%d] returned %s\n",
+ info->re_len, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_delim) {
+ Status = dbp->set_re_delim(dbp, info->re_delim) ;
+ Trace(("set_re_delim [%d] returned %s\n",
+ info->re_delim, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_pad) {
+ Status = dbp->set_re_pad(dbp, info->re_pad) ;
+ Trace(("set_re_pad [%d] returned %s\n",
+ info->re_pad, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->flags) {
+ Status = dbp->set_flags(dbp, info->flags) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->q_extentsize) {
+#ifdef AT_LEAST_DB_3_2
+ Status = dbp->set_q_extentsize(dbp, info->q_extentsize) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+#else
+ softCrash("-ExtentSize needs at least Berkeley DB 3.2.x") ;
+#endif
+ }
+
+#ifdef AT_LEAST_DB_4_1
+ if ((Status = (dbp->open)(dbp, txnid, file, subname, type, flags, mode)) == 0) {
+#else
+ if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
+#endif /* AT_LEAST_DB_4_1 */
+#else /* DB_VERSION_MAJOR == 2 */
+ if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
+#endif /* DB_VERSION_MAJOR == 2 */
+
+ Trace(("db_opened ok\n"));
+#ifdef AT_LEAST_DB_3_3
+ dbp->BackRef = db;
+#endif
+ RETVAL = db ;
+ RETVAL->dbp = dbp ;
+ RETVAL->txn = txnid ;
+#if DB_VERSION_MAJOR == 2
+ RETVAL->type = dbp->type ;
+#else /* DB_VERSION_MAJOR > 2 */
+#ifdef AT_LEAST_DB_3_3
+ dbp->get_type(dbp, &RETVAL->type) ;
+#else /* DB 3.0 -> 3.2 */
+ RETVAL->type = dbp->get_type(dbp) ;
+#endif
+#endif /* DB_VERSION_MAJOR > 2 */
+ RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
+ RETVAL->type == DB_QUEUE) ;
+ RETVAL->filename = my_strdup(file) ;
+ RETVAL->Status = Status ;
+ RETVAL->active = TRUE ;
+ hash_store_iv("BerkeleyDB::Term::Db", (char *)RETVAL, 1) ;
+ Trace((" storing %p %p in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
+ if (dbenv) {
+ RETVAL->parent_env = dbenv ;
+ dbenv->Status = Status ;
+ ++ dbenv->open_dbs ;
+ }
+ }
+ else {
+#if DB_VERSION_MAJOR > 2
+ (dbp->close)(dbp, 0) ;
+#endif
+ destroyDB(db) ;
+ Trace(("db open returned %s\n", my_db_strerror(Status))) ;
+ }
+
+ return RETVAL ;
+}
+
+
+#include "constants.h"
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB PREFIX = env_
+
+INCLUDE: constants.xs
+
+#define env_db_version(maj, min, patch) db_version(&maj, &min, &patch)
+char *
+env_db_version(maj, min, patch)
+ int maj
+ int min
+ int patch
+ OUTPUT:
+ RETVAL
+ maj
+ min
+ patch
+
+int
+db_value_set(value, which)
+ int value
+ int which
+ NOT_IMPLEMENTED_YET
+
+
+DualType
+_db_remove(ref)
+ SV * ref
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("BerkeleyDB::db_remove needs Berkeley DB 3.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->remove(dbp, db, subdb, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_db_verify(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_verify needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * outfile = NULL ;
+ FILE * ofh = NULL;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(outfile, "Outfile", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ RETVAL = 0;
+ if (outfile){
+ ofh = fopen(outfile, "w");
+ if (! ofh)
+ RETVAL = errno;
+ }
+ if (! RETVAL) {
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->verify(dbp, db, subdb, ofh, flags) ;
+ }
+ if (outfile)
+ fclose(ofh);
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_db_rename(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_rename needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * newname = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(newname, "Newname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->rename(dbp, db, subdb, newname, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Env PACKAGE = BerkeleyDB::Env PREFIX = env_
+
+
+BerkeleyDB::Env::Raw
+_db_appinit(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ char * home = NULL ;
+ char * errfile = NULL ;
+ char * server = NULL ;
+ char ** config = NULL ;
+ int flags = 0 ;
+ int setflags = 0 ;
+ int cachesize = 0 ;
+ int lk_detect = 0 ;
+ SV * errprefix = NULL;
+ DB_ENV * env ;
+ int status ;
+
+ Trace(("in _db_appinit [%s] %d\n", self, ref)) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(home, "Home", char *) ;
+ SetValue_pv(config, "Config", char **) ;
+ SetValue_sv(errprefix, "ErrPrefix") ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(setflags, "SetFlags") ;
+ SetValue_pv(server, "Server", char *) ;
+ SetValue_iv(cachesize, "Cachesize") ;
+ SetValue_iv(lk_detect, "LockDetect") ;
+#ifndef AT_LEAST_DB_3_2
+ if (setflags)
+ softCrash("-SetFlags needs Berkeley DB 3.x or better") ;
+#endif /* ! AT_LEAST_DB_3 */
+#ifndef AT_LEAST_DB_3_1
+ if (server)
+ softCrash("-Server needs Berkeley DB 3.1 or better") ;
+#endif /* ! AT_LEAST_DB_3_1 */
+ Trace(("_db_appinit(config=[%d], home=[%s],errprefix=[%s],flags=[%d]\n",
+ config, home, errprefix, flags)) ;
+#ifdef TRACE
+ if (config) {
+ int i ;
+ for (i = 0 ; i < 10 ; ++ i) {
+ if (config[i] == NULL) {
+ printf(" End\n") ;
+ break ;
+ }
+ printf(" config = [%s]\n", config[i]) ;
+ }
+ }
+#endif /* TRACE */
+ ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
+ if (flags & DB_INIT_TXN)
+ RETVAL->txn_enabled = TRUE ;
+#if DB_VERSION_MAJOR == 2
+ ZMALLOC(RETVAL->Env, DB_ENV) ;
+ env = RETVAL->Env ;
+ {
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
+
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = env->db_errfile = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
+ }
+ SetValue_iv(env->db_verbose, "Verbose") ;
+ env->db_errcall = db_errcall_cb ;
+ RETVAL->active = TRUE ;
+ status = db_appinit(home, config, env, flags) ;
+ Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ;
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ if (RETVAL->ErrHandle)
+ fclose(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL->Env) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+ }
+#else /* DB_VERSION_MAJOR > 2 */
+#ifndef AT_LEAST_DB_3_1
+# define DB_CLIENT 0
+#endif
+ status = db_env_create(&RETVAL->Env, server ? DB_CLIENT : 0) ;
+ Trace(("db_env_create flags = %d returned %s\n", flags,
+ my_db_strerror(status))) ;
+ env = RETVAL->Env ;
+#ifdef AT_LEAST_DB_3_3
+ env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
+#endif
+ if (status == 0 && cachesize) {
+ status = env->set_cachesize(env, 0, cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ cachesize, my_db_strerror(status)));
+ }
+
+ if (status == 0 && lk_detect) {
+ status = env->set_lk_detect(env, lk_detect) ;
+ Trace(("set_lk_detect [%d] returned %s\n",
+ lk_detect, my_db_strerror(status)));
+ }
+#ifdef AT_LEAST_DB_4
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_rpc_server(env, NULL, server, 0, 0, 0);
+ Trace(("ENV->set_rpc_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+#else
+# if defined(AT_LEAST_DB_3_1) && ! defined(AT_LEAST_DB_4)
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_server(env, server, 0, 0, 0);
+ Trace(("ENV->set_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+# endif
+#endif
+#ifdef AT_LEAST_DB_3_2
+ if (setflags && status == 0)
+ {
+ status = env->set_flags(env, setflags, 1);
+ Trace(("ENV->set_flags value = %d returned %s\n", setflags,
+ my_db_strerror(status))) ;
+ }
+#endif
+ if (status == 0)
+ {
+ int mode = 0 ;
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
+
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
+ env->set_errfile(env, RETVAL->ErrHandle) ;
+ }
+
+ SetValue_iv(mode, "Mode") ;
+ env->set_errcall(env, db_errcall_cb) ;
+ RETVAL->active = TRUE ;
+#ifdef IS_DB_3_0_x
+ status = (env->open)(env, home, config, flags, mode) ;
+#else /* > 3.0 */
+ status = (env->open)(env, home, flags, mode) ;
+#endif
+ Trace(("ENV->open returned %s\n", my_db_strerror(status))) ;
+ }
+
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ (env->close)(env, 0) ;
+ if (RETVAL->ErrHandle)
+ fclose(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+#endif /* DB_VERSION_MAJOR > 2 */
+ }
+ OUTPUT:
+ RETVAL
+
+void
+log_archive(env, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ PPCODE:
+ {
+ char ** list;
+ char ** file;
+ AV * av;
+#ifndef AT_LEAST_DB_3
+ softCrash("log_archive needs at least Berkeley DB 3.x.x");
+#else
+# ifdef AT_LEAST_DB_4
+ env->Status = env->Env->log_archive(env->Env, &list, flags) ;
+# else
+# ifdef AT_LEAST_DB_3_3
+ env->Status = log_archive(env->Env, &list, flags) ;
+# else
+ env->Status = log_archive(env->Env, &list, flags, safemalloc) ;
+# endif
+# endif
+ if (env->Status == 0 && list != NULL)
+ {
+ for (file = list; *file != NULL; ++file)
+ {
+ XPUSHs(sv_2mortal(newSVpv(*file, 0))) ;
+ }
+ safefree(list);
+ }
+#endif
+ }
+
+BerkeleyDB::Txn::Raw
+_txn_begin(env, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+ Trace(("txn_begin pid %d, flags %d\n", pid, flags)) ;
+#if DB_VERSION_MAJOR == 2
+ if (env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ if (pid)
+ p_id = pid->txn ;
+ env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(env->Env->tx_info, p_id, &txn) ;
+#else
+# ifdef AT_LEAST_DB_4
+ env->Env->txn_begin(env->Env, p_id, &txn, flags) ;
+# else
+ txn_begin(env->Env, p_id, &txn, flags) ;
+# endif
+#endif
+ if (env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%p] in [%p]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+#if DB_VERSION_MAJOR == 2
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env->tx_info, k, m)
+#else /* DB 3.0 or better */
+# ifdef AT_LEAST_DB_4
+# define env_txn_checkpoint(e,k,m,f) e->Env->txn_checkpoint(e->Env, k, m, f)
+# else
+# ifdef AT_LEAST_DB_3_1
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m, 0)
+# else
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m)
+# endif
+# endif
+#endif
+DualType
+env_txn_checkpoint(env, kbyte, min, flags=0)
+ BerkeleyDB::Env env
+ long kbyte
+ long min
+ u_int32_t flags
+
+HV *
+txn_stat(env)
+ BerkeleyDB::Env env
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_4
+ if(env->Env->txn_stat(env->Env, &stat, 0) == 0) {
+#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
+ if(txn_stat(env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+#define EnDis(x) ((x) ? "Enabled" : "Disabled")
+void
+printEnv(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#if 0
+ printf("env [0x%X]\n", env) ;
+ printf(" ErrPrefix [%s]\n", env->ErrPrefix
+ ? SvPVX(env->ErrPrefix) : 0) ;
+ printf(" DB_ENV\n") ;
+ printf(" db_lorder [%d]\n", env->Env.db_lorder) ;
+ printf(" db_home [%s]\n", env->Env.db_home) ;
+ printf(" db_data_dir [%s]\n", env->Env.db_data_dir) ;
+ printf(" db_log_dir [%s]\n", env->Env.db_log_dir) ;
+ printf(" db_tmp_dir [%s]\n", env->Env.db_tmp_dir) ;
+ printf(" lk_info [%s]\n", EnDis(env->Env.lk_info)) ;
+ printf(" lk_max [%d]\n", env->Env.lk_max) ;
+ printf(" lg_info [%s]\n", EnDis(env->Env.lg_info)) ;
+ printf(" lg_max [%d]\n", env->Env.lg_max) ;
+ printf(" mp_info [%s]\n", EnDis(env->Env.mp_info)) ;
+ printf(" mp_size [%d]\n", env->Env.mp_size) ;
+ printf(" tx_info [%s]\n", EnDis(env->Env.tx_info)) ;
+ printf(" tx_max [%d]\n", env->Env.tx_max) ;
+ printf(" flags [%d]\n", env->Env.flags) ;
+ printf("\n") ;
+#endif
+
+SV *
+errPrefix(env, prefix)
+ BerkeleyDB::Env env
+ SV * prefix
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+ if (env->ErrPrefix) {
+ RETVAL = newSVsv(env->ErrPrefix) ;
+ SvPOK_only(RETVAL) ;
+ sv_setsv(env->ErrPrefix, prefix) ;
+ }
+ else {
+ RETVAL = NULL ;
+ env->ErrPrefix = newSVsv(prefix) ;
+ }
+ SvPOK_only(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ env->Env->db_errpfx = SvPVX(env->ErrPrefix) ;
+#else
+ env->Env->set_errpfx(env->Env, SvPVX(env->ErrPrefix)) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(env)
+ BerkeleyDB::Env env
+ CODE:
+ RETVAL = env->Status ;
+ OUTPUT:
+ RETVAL
+
+DualType
+db_appexit(env)
+ BerkeleyDB::Env env
+ ALIAS: close =1
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#ifdef STRICT_CLOSE
+ if (env->open_dbs)
+ softCrash("attempted to close an environment with %d open database(s)",
+ env->open_dbs) ;
+#endif /* STRICT_CLOSE */
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db_appexit(env->Env) ;
+#else
+ RETVAL = (env->Env->close)(env->Env, 0) ;
+#endif
+ env->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(env)
+ BerkeleyDB::Env env
+ int RETVAL = 0 ;
+ CODE:
+ Trace(("In BerkeleyDB::Env::DESTROY\n"));
+ Trace((" env %ld Env %ld dirty %d\n", env, &env->Env, PL_dirty)) ;
+ if (env->active)
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ if (env->ErrHandle)
+ fclose(env->ErrHandle) ;
+ if (env->ErrPrefix)
+ SvREFCNT_dec(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ Safefree(env->Env) ;
+#endif
+ Safefree(env) ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
+
+BerkeleyDB::TxnMgr::Raw
+_TxnMgr(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ CODE:
+ ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
+ RETVAL->env = env ;
+ /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (char *)txn, 1) ; */
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_bsize(env, bsize)
+ BerkeleyDB::Env env
+ u_int32_t bsize
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_bsize needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_bsize(env->Env, bsize);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_max(env, lg_max)
+ BerkeleyDB::Env env
+ u_int32_t lg_max
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_max needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_max(env->Env, lg_max);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_data_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_tmp_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_tmp_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_tmp_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_mutexlocks(env, do_lock)
+ BerkeleyDB::Env env
+ int do_lock
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
+#else
+# ifdef AT_LEAST_DB_4
+ RETVAL = env->Status = env->Env->set_flags(env->Env, DB_NOLOCKING, do_lock);
+# else
+# if defined(AT_LEAST_DB_3_2_6) || defined(IS_DB_3_0_x)
+ RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
+# else /* DB 3.1 or 3.2.3 */
+ RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
+# endif
+# endif
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_verbose(env, which, onoff)
+ BerkeleyDB::Env env
+ u_int32_t which
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_verbose needs Berkeley DB 3.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_verbose(env->Env, which, onoff);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_flags(env, flags, onoff)
+ BerkeleyDB::Env env
+ u_int32_t flags
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_2
+ softCrash("$env->set_flags needs Berkeley DB 3.2.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_flags(env->Env, flags, onoff);
+#endif
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Term PACKAGE = BerkeleyDB::Term
+
+void
+close_everything()
+
+#define safeCroak(string) softCrash(string)
+void
+safeCroak(string)
+ char * string
+
+MODULE = BerkeleyDB::Hash PACKAGE = BerkeleyDB::Hash PREFIX = hash_
+
+BerkeleyDB::Hash::Raw
+_db_open_hash(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ Trace(("_db_open_hash start\n")) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Hash")) && sv != &PL_sv_undef) {
+ info.h_hash = hash_cb ;
+ db->hash = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_HASH, flags, mode, &info) ;
+ Trace(("_db_open_hash end\n")) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
+#else
+ DB_HASH_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
+ hv_store_iv(RETVAL, "hash_version", stat->hash_version);
+ hv_store_iv(RETVAL, "hash_pagesize", stat->hash_pagesize);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nkeys", stat->hash_nkeys);
+ hv_store_iv(RETVAL, "hash_ndata", stat->hash_ndata);
+#else
+ hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
+#endif
+#ifndef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
+#endif
+ hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
+ hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
+ hv_store_iv(RETVAL, "hash_free", stat->hash_free);
+ hv_store_iv(RETVAL, "hash_bfree", stat->hash_bfree);
+ hv_store_iv(RETVAL, "hash_bigpages", stat->hash_bigpages);
+ hv_store_iv(RETVAL, "hash_big_bfree", stat->hash_big_bfree);
+ hv_store_iv(RETVAL, "hash_overflows", stat->hash_overflows);
+ hv_store_iv(RETVAL, "hash_ovfl_free", stat->hash_ovfl_free);
+ hv_store_iv(RETVAL, "hash_dup", stat->hash_dup);
+ hv_store_iv(RETVAL, "hash_dup_free", stat->hash_dup_free);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "hash_metaflags", stat->hash_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Unknown PACKAGE = BerkeleyDB::Unknown PREFIX = hash_
+
+void
+_db_open_unknown(ref)
+ SV * ref
+ PPCODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB RETVAL ;
+ BerkeleyDB__Txn txn = NULL ;
+ static char * Names[] = {"", "Btree", "Hash", "Recno"} ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_UNKNOWN, flags, mode, &info) ;
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(RETVAL))));
+ if (RETVAL)
+ XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
+ else
+ XPUSHs(sv_2mortal(newSViv((IV)NULL)));
+ }
+
+
+
+MODULE = BerkeleyDB::Btree PACKAGE = BerkeleyDB::Btree PREFIX = btree_
+
+BerkeleyDB::Btree::Raw
+_db_open_btree(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ Trace(("In _db_open_btree\n"));
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char*) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Compare callback\n"));
+ info.bt_compare = btree_compare ;
+ db->compare = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ Trace((" Parsed DupCompare callback\n"));
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ softCrash("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Prefix callback\n"));
+ info.bt_prefix = btree_prefix ;
+ db->prefix = newSVsv(sv) ;
+ }
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_BTREE, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DB_BTREE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
+ hv_store_iv(RETVAL, "bt_version", stat->bt_version);
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "bt_metaflags", stat->bt_metaflags) ;
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_metaflags) ;
+#else
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_flags) ;
+#endif
+ hv_store_iv(RETVAL, "bt_maxkey", stat->bt_maxkey) ;
+ hv_store_iv(RETVAL, "bt_minkey", stat->bt_minkey);
+ hv_store_iv(RETVAL, "bt_re_len", stat->bt_re_len);
+ hv_store_iv(RETVAL, "bt_re_pad", stat->bt_re_pad);
+ hv_store_iv(RETVAL, "bt_pagesize", stat->bt_pagesize);
+ hv_store_iv(RETVAL, "bt_levels", stat->bt_levels);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "bt_nkeys", stat->bt_nkeys);
+ hv_store_iv(RETVAL, "bt_ndata", stat->bt_ndata);
+#else
+ hv_store_iv(RETVAL, "bt_nrecs", stat->bt_nrecs);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pg", stat->bt_int_pg);
+ hv_store_iv(RETVAL, "bt_leaf_pg", stat->bt_leaf_pg);
+ hv_store_iv(RETVAL, "bt_dup_pg", stat->bt_dup_pg);
+ hv_store_iv(RETVAL, "bt_over_pg", stat->bt_over_pg);
+ hv_store_iv(RETVAL, "bt_free", stat->bt_free);
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ hv_store_iv(RETVAL, "bt_freed", stat->bt_freed);
+ hv_store_iv(RETVAL, "bt_pfxsaved", stat->bt_pfxsaved);
+ hv_store_iv(RETVAL, "bt_split", stat->bt_split);
+ hv_store_iv(RETVAL, "bt_rootsplit", stat->bt_rootsplit);
+ hv_store_iv(RETVAL, "bt_fastsplit", stat->bt_fastsplit);
+ hv_store_iv(RETVAL, "bt_added", stat->bt_added);
+ hv_store_iv(RETVAL, "bt_deleted", stat->bt_deleted);
+ hv_store_iv(RETVAL, "bt_get", stat->bt_get);
+ hv_store_iv(RETVAL, "bt_cache_hit", stat->bt_cache_hit);
+ hv_store_iv(RETVAL, "bt_cache_miss", stat->bt_cache_miss);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pgfree", stat->bt_int_pgfree);
+ hv_store_iv(RETVAL, "bt_leaf_pgfree", stat->bt_leaf_pgfree);
+ hv_store_iv(RETVAL, "bt_dup_pgfree", stat->bt_dup_pgfree);
+ hv_store_iv(RETVAL, "bt_over_pgfree", stat->bt_over_pgfree);
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Recno PACKAGE = BerkeleyDB::Recno PREFIX = recno_
+
+BerkeleyDB::Recno::Raw
+_db_open_recno(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+
+ SetValue_iv(info.flags, "Property") ;
+ SetValue_pv(info.re_source, "Source", char*) ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Delim")) && sv != &PL_sv_undef) {
+ info.re_delim = SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_DELIMITER) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_RECNO, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Queue PACKAGE = BerkeleyDB::Queue PREFIX = recno_
+
+BerkeleyDB::Queue::Raw
+_db_open_queue(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("BerkeleyDB::Queue needs Berkeley DB 3.0.x or better");
+#else
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.q_extentsize, "ExtentSize") ;
+
+
+ SetValue_iv(info.flags, "Property") ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_QUEUE, flags, mode, &info) ;
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
+#else /* Berkeley DB 3, or better */
+ DB_QUEUE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
+ hv_store_iv(RETVAL, "qs_version", stat->qs_version);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "qs_nkeys", stat->qs_nkeys);
+ hv_store_iv(RETVAL, "qs_ndata", stat->qs_ndata);
+#else
+ hv_store_iv(RETVAL, "qs_nrecs", stat->qs_nrecs);
+#endif
+ hv_store_iv(RETVAL, "qs_pages", stat->qs_pages);
+ hv_store_iv(RETVAL, "qs_pagesize", stat->qs_pagesize);
+ hv_store_iv(RETVAL, "qs_pgfree", stat->qs_pgfree);
+ hv_store_iv(RETVAL, "qs_re_len", stat->qs_re_len);
+ hv_store_iv(RETVAL, "qs_re_pad", stat->qs_re_pad);
+#ifdef AT_LEAST_DB_3_2
+#else
+ hv_store_iv(RETVAL, "qs_start", stat->qs_start);
+#endif
+ hv_store_iv(RETVAL, "qs_first_recno", stat->qs_first_recno);
+ hv_store_iv(RETVAL, "qs_cur_recno", stat->qs_cur_recno);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "qs_metaflags", stat->qs_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Common PACKAGE = BerkeleyDB::Common PREFIX = dab_
+
+
+DualType
+db_close(db,flags=0)
+ int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ CODE:
+ Trace(("BerkeleyDB::Common::db_close %d\n", db));
+#ifdef STRICT_CLOSE
+ if (db->txn)
+ softCrash("attempted to close a database while a transaction was still open") ;
+ if (db->open_cursors)
+ softCrash("attempted to close a database with %d open cursor(s)",
+ db->open_cursors) ;
+#endif /* STRICT_CLOSE */
+ RETVAL = db->Status = ((db->dbp)->close)(db->dbp, flags) ;
+ if (db->parent_env && db->parent_env->open_dbs)
+ -- db->parent_env->open_dbs ;
+ db->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ -- db->open_cursors ;
+ Trace(("end of BerkeleyDB::Common::db_close\n"));
+ OUTPUT:
+ RETVAL
+
+void
+dab__DESTROY(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ Trace(("In BerkeleyDB::Common::_DESTROY db %d dirty=%d\n", db, PL_dirty)) ;
+ destroyDB(db) ;
+ Trace(("End of BerkeleyDB::Common::DESTROY \n")) ;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur)
+#else
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur,flags)
+#endif
+BerkeleyDB::Cursor::Raw
+_db_cursor(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DBC * cursor ;
+ CurrentDB = db ;
+ if ((db->Status = db_cursor(db, db->txn, &cursor, flags)) == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->txn = db->txn ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Cursor::Raw
+_db_join(db, cursors, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ AV * cursors
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2 && (DB_VERSION_MINOR < 5 || (DB_VERSION_MINOR == 5 && DB_VERSION_PATCH < 2))
+ softCrash("join needs Berkeley DB 2.5.2 or later") ;
+#else /* Berkeley DB >= 2.5.2 */
+ DBC * join_cursor ;
+ DBC ** cursor_list ;
+ I32 count = av_len(cursors) + 1 ;
+ int i ;
+ CurrentDB = db ;
+ if (count < 1 )
+ softCrash("db_join: No cursors in parameter list") ;
+ cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
+ for (i = 0 ; i < count ; ++i) {
+ SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
+ IV tmp = SvIV(getInnerObject(obj)) ;
+ BerkeleyDB__Cursor cur = INT2PTR(BerkeleyDB__Cursor, tmp);
+ cursor_list[i] = cur->cursor ;
+ }
+ cursor_list[i] = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, flags, &join_cursor)) == 0){
+#else
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, &join_cursor, flags)) == 0){
+#endif
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = join_cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ safefree(cursor_list) ;
+#endif /* Berkeley DB >= 2.5.2 */
+ }
+ OUTPUT:
+ RETVAL
+
+int
+ArrayOffset(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL = db->array_base ? 0 : 1 ;
+#else
+ RETVAL = 0 ;
+#endif /* ALLOW_RECNO_OFFSET */
+ OUTPUT:
+ RETVAL
+
+int
+type(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ RETVAL = db->type ;
+ OUTPUT:
+ RETVAL
+
+int
+byteswapped(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ softCrash("byteswapped needs Berkeley DB 2.5 or later") ;
+#else
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db->dbp->byteswapped ;
+#else
+#ifdef AT_LEAST_DB_3_3
+ db->dbp->get_byteswapped(db->dbp, &RETVAL) ;
+#else
+ RETVAL = db->dbp->get_byteswapped(db->dbp) ;
+#endif
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(db)
+ BerkeleyDB::Common db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+#ifdef DBM_FILTERING
+
+#define setFilter(ftype) \
+ { \
+ if (db->ftype) \
+ RETVAL = sv_mortalcopy(db->ftype) ; \
+ ST(0) = RETVAL ; \
+ if (db->ftype && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->ftype) ; \
+ db->ftype = NULL ; \
+ } \
+ else if (code) { \
+ if (db->ftype) \
+ sv_setsv(db->ftype, code) ; \
+ else \
+ db->ftype = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_key, code) ;
+
+SV *
+filter_store_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_key, code) ;
+
+SV *
+filter_fetch_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_value, code) ;
+
+SV *
+filter_store_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_value, code) ;
+
+#endif /* DBM_FILTERING */
+
+void
+partial_set(db, offset, length)
+ BerkeleyDB::Common db
+ u_int32_t offset
+ u_int32_t length
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial = DB_DBT_PARTIAL ;
+ db->doff = offset ;
+ db->dlen = length ;
+
+
+void
+partial_clear(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial =
+ db->doff =
+ db->dlen = 0 ;
+
+
+#define db_del(db, key, flags) \
+ (db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
+DualType
+db_del(db, key, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ INIT:
+ Trace(("db_del db[%p] in [%p] txn[%p] key[%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+
+#ifdef AT_LEAST_DB_3
+# ifdef AT_LEAST_DB_3_2
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_CONSUME_WAIT)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# else
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# endif
+#else
+#define writeToKey() (flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+#endif
+#define db_get(db, key, data, flags) \
+ (db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
+DualType
+db_get(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ DBT_OPT data
+ CODE:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ Trace(("db_get db[%p] in [%p] txn[%p] key [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
+ RETVAL = db_get(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+ OUTPUT:
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ data
+
+#define db_pget(db, key, pkey, data, flags) \
+ (db->Status = ((db->dbp)->pget)(db->dbp, db->txn, &key, &pkey, &data, flags))
+DualType
+db_pget(db, key, pkey, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
+ DBT_OPT data
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("db_pget db [%p] in [%p] txn [%p] flags [%d]\n", db->dbp, db, db->txn, flags)) ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = db_pget(db, key, pkey, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+#endif
+ OUTPUT:
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ pkey
+ data
+
+#define db_put(db,key,data,flag) \
+ (db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
+DualType
+db_put(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ DBT data
+ CODE:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ /* SetPartial(data,db) ; */
+ Trace(("db_put db[%p] in [%p] txn[%p] key[%.*s] data [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, data.size, data.data, flags)) ;
+ RETVAL = db_put(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+ OUTPUT:
+ RETVAL
+ key if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
+
+#define db_key_range(db, key, range, flags) \
+ (db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
+DualType
+db_key_range(db, key, less, equal, greater, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ double less = 0.0 ;
+ double equal = 0.0 ;
+ double greater = 0.0 ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("key_range needs Berkeley DB 3.1.x or later") ;
+#else
+ DB_KEY_RANGE range ;
+ range.less = range.equal = range.greater = 0.0 ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ RETVAL = db_key_range(db, key, range, flags);
+ if (RETVAL == 0) {
+ less = range.less ;
+ equal = range.equal;
+ greater = range.greater;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+ less
+ equal
+ greater
+
+
+#define db_fd(d, x) (db->Status = (db->dbp->fd)(db->dbp, &x))
+DualType
+db_fd(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ CurrentDB = db ;
+ db_fd(db, RETVAL) ;
+ OUTPUT:
+ RETVAL
+
+
+#define db_sync(db, fl) (db->Status = (db->dbp->sync)(db->dbp, fl))
+DualType
+db_sync(db, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+void
+_Txn(db, txn=NULL)
+ BerkeleyDB::Common db
+ BerkeleyDB::Txn txn
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ if (txn) {
+ Trace(("_Txn[%p] in[%p] active [%d]\n", txn->txn, txn, txn->active));
+ ckActive_Transaction(txn->active) ;
+ db->txn = txn->txn ;
+ }
+ else {
+ Trace(("_Txn[undef] \n"));
+ db->txn = NULL ;
+ }
+
+
+#define db_truncate(db, countp, flags) \
+ (db->Status = ((db->dbp)->truncate)(db->dbp, db->txn, &countp, flags))
+DualType
+truncate(db, countp, flags=0)
+ BerkeleyDB::Common db
+ u_int32_t countp
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("truncate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ RETVAL = db_truncate(db, countp, flags);
+#endif
+ OUTPUT:
+ RETVAL
+ countp
+
+#ifdef AT_LEAST_DB_4_1
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, NULL, sec->dbp, &cb, flags))
+#else
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, sec->dbp, &cb, flags))
+#endif
+DualType
+associate(db, secondary, callback, flags=0)
+ BerkeleyDB::Common db
+ BerkeleyDB::Common secondary
+ SV* callback
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("associate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ /* db->associated = newSVsv(callback) ; */
+ secondary->associated = newSVsv(callback) ;
+ /* secondary->dbp->app_private = secondary->associated ; */
+ secondary->secondary_db = TRUE;
+ RETVAL = db_associate(db, secondary, associate_cb, flags);
+#endif
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Cursor PACKAGE = BerkeleyDB::Cursor PREFIX = cu_
+
+BerkeleyDB::Cursor::Raw
+_c_dup(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Cursor db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("c_dup needs at least Berkeley DB 3.0.x");
+#else
+ DBC * newcursor ;
+ db->Status = ((db->cursor)->c_dup)(db->cursor, &newcursor, flags) ;
+ if (db->Status == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->parent_db->open_cursors ++ ;
+ RETVAL->parent_db = db->parent_db ;
+ RETVAL->cursor = newcursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+#endif
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif /* DBM_FILTERING */
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_c_close(db)
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ CODE:
+ RETVAL = db->Status =
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->active = FALSE ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ OUTPUT:
+ RETVAL
+
+void
+_DESTROY(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ CurrentDB = db->parent_db ;
+ Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ if (db->active)
+ ((db->cursor)->c_close)(db->cursor) ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ Safefree(db->filename) ;
+ Safefree(db) ;
+ Trace(("End of BerkeleyDB::Cursor::_DESTROY\n")) ;
+
+DualType
+status(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_del(c,f) (c->Status = ((c->cursor)->c_del)(c->cursor,f))
+DualType
+cu_c_del(db, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
+DualType
+cu_c_get(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBT_B data
+ INIT:
+ Trace(("c_get db [%p] in [%p] flags [%d]\n", db->dbp, db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ Trace(("c_get end\n")) ;
+ OUTPUT:
+ RETVAL
+ key
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+#define cu_c_pget(c,k,p,d,f) (c->Status = (c->secondary_db ? (c->cursor->c_pget)(c->cursor,&k,&p,&d,f) : EINVAL))
+DualType
+cu_c_pget(db, key, pkey, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
+ DBT_B data
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_c_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("c_pget db [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = cu_c_pget(db, key, pkey, data, flags);
+ Trace(("c_pget end\n")) ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ pkey
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+
+
+#define cu_c_put(c,k,d,f) (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
+DualType
+cu_c_put(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY key
+ DBT data
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ RETVAL
+
+#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
+DualType
+cu_c_count(db, count, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ u_int32_t count = NO_INIT
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("c_count needs at least Berkeley DB 3.1.x");
+#else
+ Trace(("c_get count [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ RETVAL = cu_c_count(db, count, flags) ;
+ Trace((" c_count got %d duplicates\n", count)) ;
+#endif
+ OUTPUT:
+ RETVAL
+ count
+
+MODULE = BerkeleyDB::TxnMgr PACKAGE = BerkeleyDB::TxnMgr PREFIX = xx_
+
+BerkeleyDB::Txn::Raw
+_txn_begin(txnmgr, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::TxnMgr txnmgr
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if (txnmgr->env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (pid)
+ p_id = pid->txn ;
+ txnmgr->env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
+#else
+# ifdef AT_LEAST_DB_4
+ txnmgr->env->Env->txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# else
+ txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# endif
+#endif
+ if (txnmgr->env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+DualType
+status(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ RETVAL = mgr->env->TxnMgrStatus ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ Trace(("In BerkeleyDB::TxnMgr::DESTROY dirty=%d\n", PL_dirty)) ;
+ Safefree(mgr) ;
+ Trace(("End of BerkeleyDB::TxnMgr::DESTROY\n")) ;
+
+DualType
+txn_close(txnp)
+ BerkeleyDB::TxnMgr txnp
+ NOT_IMPLEMENTED_YET
+
+
+#if DB_VERSION_MAJOR == 2
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env->tx_info, k, m)
+#else
+# ifdef AT_LEAST_DB_4
+# define xx_txn_checkpoint(e,k,m,f) e->env->Env->txn_checkpoint(e->env->Env, k, m, f)
+# else
+# ifdef AT_LEAST_DB_3_1
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m, 0)
+# else
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m)
+# endif
+# endif
+#endif
+DualType
+xx_txn_checkpoint(txnp, kbyte, min, flags=0)
+ BerkeleyDB::TxnMgr txnp
+ long kbyte
+ long min
+ u_int32_t flags
+
+HV *
+txn_stat(txnp)
+ BerkeleyDB::TxnMgr txnp
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_4
+ if(txnp->env->Env->txn_stat(txnp->env->Env, &stat, 0) == 0) {
+#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(txnp->env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
+ if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+BerkeleyDB::TxnMgr
+txn_open(dir, flags, mode, dbenv)
+ int flags
+ const char * dir
+ int mode
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+
+MODULE = BerkeleyDB::Txn PACKAGE = BerkeleyDB::Txn PREFIX = xx_
+
+DualType
+status(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ RETVAL = tid->Status ;
+ OUTPUT:
+ RETVAL
+
+int
+_DESTROY(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
+ if (tid->active)
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
+ txn_abort(tid->txn) ;
+#endif
+ RETVAL = (int)tid ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ Safefree(tid) ;
+ Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
+ OUTPUT:
+ RETVAL
+
+#define xx_txn_unlink(d,f,e) txn_unlink(d,f,&(e->Env))
+DualType
+xx_txn_unlink(dir, force, dbenv)
+ const char * dir
+ int force
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+#ifdef AT_LEAST_DB_4
+# define xx_txn_prepare(t) (t->Status = t->txn->prepare(t->txn, 0))
+#else
+# ifdef AT_LEAST_DB_3_3
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn, 0))
+# else
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+# endif
+#endif
+DualType
+xx_txn_prepare(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_commit(t,flags) (t->Status = t->txn->commit(t->txn, flags))
+#else
+# if DB_VERSION_MAJOR == 2
+# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+# else
+# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+# endif
+#endif
+DualType
+_txn_commit(tid, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_abort(t) (t->Status = t->txn->abort(t->txn))
+#else
+# define _txn_abort(t) (t->Status = txn_abort(t->txn))
+#endif
+DualType
+_txn_abort(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_discard(t,f) (t->Status = t->txn->discard(t->txn, f))
+#else
+# ifdef AT_LEAST_DB_3_3_4
+# define _txn_discard(t,f) (t->Status = txn_discard(t->txn, f))
+# else
+# define _txn_discard(t,f) (int)softCrash("txn_discard needs Berkeley DB 3.3.4 or better") ;
+# endif
+#endif
+DualType
+_txn_discard(tid, flags=0)
+ BerkeleyDB::Txn tid
+ u_int32_t flags
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define xx_txn_id(t) t->txn->id(t->txn)
+#else
+# define xx_txn_id(t) txn_id(t->txn)
+#endif
+u_int32_t
+xx_txn_id(tid)
+ BerkeleyDB::Txn tid
+
+MODULE = BerkeleyDB::_tiedHash PACKAGE = BerkeleyDB::_tiedHash
+
+int
+FIRSTKEY(db)
+ BerkeleyDB::Common db
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DBC * cursor ;
+
+ /*
+ TODO!
+ set partial value to 0 - to eliminate the retrieval of
+ the value need to store any existing partial settings &
+ restore at the end.
+
+ */
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ /* If necessary create a cursor for FIRSTKEY/NEXTKEY use */
+ if (!db->cursor &&
+ (db->Status = db_cursor(db, db->txn, &cursor, 0)) == 0 )
+ db->cursor = cursor ;
+
+ if (db->cursor)
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_FIRST);
+ else
+ RETVAL = db->Status ;
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+
+
+int
+NEXTKEY(db, key)
+ BerkeleyDB::Common db
+ DBTKEY key = NO_INIT
+ CODE:
+ {
+ DBT value ;
+
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ key.flags = 0 ;
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_NEXT);
+
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+MODULE = BerkeleyDB::_tiedArray PACKAGE = BerkeleyDB::_tiedArray
+
+I32
+FETCHSIZE(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(db) ;
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ SV * version_sv = perl_get_sv("BerkeleyDB::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("BerkeleyDB::db_ver", GV_ADD|GV_ADDMULTI) ;
+ int Major, Minor, Patch ;
+ (void)db_version(&Major, &Minor, &Patch) ;
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nBerkeleyDB needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ if (Major < 2 || (Major == 2 && Minor < 6))
+ {
+ croak("BerkeleyDB needs Berkeley DB 2.6 or greater. This is %d.%d.%d\n",
+ Major, Minor, Patch) ;
+ }
+ sv_setpvf(version_sv, "%d.%d", Major, Minor) ;
+ sv_setpvf(ver_sv, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(sv_err, "");
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(db_recno_t) ;
+ empty.flags = 0 ;
+
+ }
+
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
new file mode 100644
index 00000000000..ba9a9c0085d
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Btree ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
new file mode 100644
index 00000000000..8e7bc7e78c7
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Hash ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/storage/bdb/perl/BerkeleyDB/Changes b/storage/bdb/perl/BerkeleyDB/Changes
new file mode 100644
index 00000000000..cbeb1a34d73
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/Changes
@@ -0,0 +1,167 @@
+Revision history for Perl extension BerkeleyDB.
+
+0.20 2nd September 2002
+
+ * More support for building with Berkeley DB 4.1.x
+ * db->get & db->pget used the wrong output macro for DBM filters
+ bug spotted by Aaron Ross.
+ * db_join didn't keep a reference to the cursors it was joining.
+ Spotted by Winton Davies.
+
+0.19 5th June 2002
+ * Removed the targets that used mkconsts from Makefile.PL. They relied
+ on a module that is not available in all versions of Perl.
+ * added support for env->set_verbose
+ * added support for db->truncate
+ * added support for db->rename via BerkeleyDB::db_rename
+ * added support for db->verify via BerkeleyDB::db_verify
+ * added support for db->associate, db->pget & cursor->c_pget
+ * Builds with Berkeley DB 4.1.x
+
+
+0.18 6th January 2002
+ * Dropped support for ErrFile as a file handle. It was proving too
+ difficult to get at the underlying FILE * in XS.
+ Reported by Jonas Smedegaard (Debian powerpc) & Kenneth Olwing (Win32)
+ * Fixed problem with abort macro in XSUB.h clashing with txn abort
+ method in Berkeley DB 4.x -- patch supplied by Kenneth Olwing.
+ * DB->set_alloc was getting called too late in BerkeleyDB.xs.
+ This was causing problems with ActivePerl -- problem reported
+ by Kenneth Olwing.
+ * When opening a queue, the Len proprty set the DB_PAD flag.
+ Should have been DB_FIXEDLEN. Fix provided by Kenneth Olwing.
+ * Test harness fixes from Kenneth Olwing.
+
+0.17 23 September 2001
+ * Fixed a bug in BerkeleyDB::Recno - reported by Niklas Paulsson.
+ * Added log_archive - patch supplied by Benjamin Holzman
+ * Added txn_discard
+ * Builds with Berkeley DB 4.0.x
+
+0.16 1 August 2001
+ * added support for Berkeley DB 3.3.x (but no support for any of the
+ new features just yet)
+
+0.15 26 April 2001
+ * Fixed a bug in the processing of the flags options in
+ db_key_range.
+ * added support for set_lg_max & set_lg_bsize
+ * allow DB_TMP_DIR and DB_TEMP_DIR
+ * the -Filename parameter to BerkeleyDB::Queue didn't work.
+ * added symbol DB_CONSUME_WAIT
+
+0.14 21st January 2001
+ * Silenced the warnings when build with a 64-bit Perl.
+ * Can now build with DB 3.2.3h (part of MySQL). The test harness
+ takes an age to do the queue test, but it does eventually pass.
+ * Mentioned the problems that occur when perl is built with sfio.
+
+0.13 15th January 2001
+ * Added support to allow this module to build with Berkeley DB 3.2
+ * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
+ changes.
+ * Documented the Solaris 2.7 core dump problem in README.
+ * Tidied up the test harness to fix a problem on Solaris where the
+ "fred" directory wasn't being deleted when it should have been.
+ * two calls to "open" clashed with a win32 macro.
+ * size argument for hash_cb is different for Berkeley DB 3.x
+ * Documented the issue of building on Linux.
+ * Added -Server, -CacheSize & -LockDetect options
+ [original patch supplied by Graham Barr]
+ * Added support for set_mutexlocks, c_count, set_q_extentsize,
+ key_range, c_dup
+ * Dropped the "attempted to close a Cursor with an open transaction"
+ error in c_close. The correct behaviour is that the cursor
+ should be closed before committing/aborting the transaction.
+
+0.12 2nd August 2000
+ * Serious bug with get fixed. Spotted by Sleepycat.
+ * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+
+0.11 4th June 2000
+ * When built with Berkeley Db 3.x there can be a clash with the close
+ macro.
+ * Typo in the definition of DB_WRITECURSOR
+ * The flags parameter wasn't getting sent to db_cursor
+ * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
+ memory)
+ * Can be built with Berkeley DB 3.1
+
+0.10 8th December 1999
+ * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
+ a memory leak. Fixed.
+ * If opening an environment or database failed, there was a small
+ memory leak. This has been fixed.
+ * A thread-enabled Perl it could core when a database was closed.
+ Problem traced to the strdup function.
+
+0.09 29th November 1999
+ * the queue.t & subdb.t test harnesses were outputting a few
+ spurious warnings. This has been fixed.
+
+0.08 28nd November 1999
+ * More documentation updates
+ * Changed reference to files in /tmp in examples.t
+ * Fixed a typo in softCrash that caused problems when building
+ with a thread-enabled Perl.
+ * BerkeleyDB::Error wasn't initialised properly.
+ * ANSI-ified all the static C functions in BerkeleyDB.xs
+ * Added support for the following DB 3.x features:
+ + The Queue database type
+ + db_remove
+ + subdatabases
+ + db_stat for Hash & Queue
+
+0.07 21st September 1999
+ * Numerous small bug fixes.
+ * Added support for sorting duplicate values DB_DUPSORT.
+ * Added support for DB_GET_BOTH & DB_NEXT_DUP.
+ * Added get_dup (from DB_File).
+ * beefed up the documentation.
+ * Forgot to add the DB_INIT_CDB in BerkeleyDB.pm in previous release.
+ * Merged the DBM Filter code from DB_File into BerkeleyDB.
+ * Fixed a nasty bug where a closed transaction was still used with
+ with dp_put, db_get etc.
+ * Added logic to gracefully close everything whenever a fatal error
+ happens. Previously the plug was just pulled.
+ * It is now a fatal error to explicitly close an environment if there
+ is still an open database; a database when there are open cursors or
+ an open transaction; and a cursor if there is an open transaction.
+ Using object destruction doesn't have this issue, as object
+ references will ensure everything gets closed in the correct order.
+ * The BOOT code now checks that the version of db.h & libdb are the
+ same - this seems to be a common problem on Linux.
+ * MLDBM support added.
+ * Support for the new join cursor added.
+ * Builds with Berkeley DB 3.x
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+ * Deprecated the TxnMgr class. As with Berkeley DB version 3,
+ txn_begin etc are now accessed via the environment object.
+
+0.06 19 December 1998
+ * Minor modifications to get the module to build with DB 2.6.x
+ * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
+
+0.05 9 November 1998
+ * Added a note to README about how to build Berkeley DB 2.x
+ when using HP-UX.
+ * Minor modifications to get the module to build with DB 2.5.x
+
+0.04 19 May 1998
+ * Define DEFSV & SAVE_DEFSV if not already defined. This allows
+ the module to be built with Perl 5.004_04.
+
+0.03 5 May 1998
+ * fixed db_get with DB_SET_RECNO
+ * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
+ * implemented BerkeleyDB::Unknown
+ * implemented BerkeleyDB::Recno, including push, pop etc
+ modified the txn support.
+
+0.02 30 October 1997
+ * renamed module to BerkeleyDB
+ * fixed a few bugs & added more tests
+
+0.01 23 October 1997
+ * first alpha release as BerkDB.
+
diff --git a/storage/bdb/perl/BerkeleyDB/MANIFEST b/storage/bdb/perl/BerkeleyDB/MANIFEST
new file mode 100644
index 00000000000..7da51ef7d7c
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/MANIFEST
@@ -0,0 +1,56 @@
+BerkeleyDB.pm
+BerkeleyDB.pod
+BerkeleyDB.pod.P
+BerkeleyDB.xs
+BerkeleyDB/Btree.pm
+BerkeleyDB/Hash.pm
+Changes
+config.in
+constants.h
+constants.xs
+dbinfo
+hints/dec_osf.pl
+hints/solaris.pl
+hints/irix_6_5.pl
+Makefile.PL
+MANIFEST
+mkconsts
+mkpod
+ppport.h
+README
+t/btree.t
+t/db-3.0.t
+t/db-3.1.t
+t/db-3.2.t
+t/db-3.3.t
+t/destroy.t
+t/env.t
+t/examples.t
+t/examples.t.T
+t/examples3.t
+t/examples3.t.T
+t/filter.t
+t/hash.t
+t/join.t
+t/mldbm.t
+t/queue.t
+t/recno.t
+t/strict.t
+t/subdb.t
+t/txn.t
+t/unknown.t
+t/util.pm
+Todo
+typemap
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
+scan
diff --git a/storage/bdb/perl/BerkeleyDB/Makefile.PL b/storage/bdb/perl/BerkeleyDB/Makefile.PL
new file mode 100644
index 00000000000..86da9a845af
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/Makefile.PL
@@ -0,0 +1,123 @@
+#! perl -w
+
+# It should not be necessary to edit this file. The configuration for
+# BerkeleyDB is controlled from the file config.in
+
+
+BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
+
+use strict ;
+use ExtUtils::MakeMaker ;
+use Config ;
+
+# Check for the presence of sfio
+if ($Config{'d_sfio'}) {
+ print <<EOM;
+
+WARNING: Perl seems to have been built with SFIO support enabled.
+ Please read the SFIO Notes in the README file.
+
+EOM
+}
+
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $^O eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'BerkeleyDB',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ #MAN3PODS => {}, # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'BerkeleyDB.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2",
+ #'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
+ ($] >= 5.005
+ ? (ABSTRACT_FROM => 'BerkeleyDB.pod',
+ AUTHOR => 'Paul Marquess <Paul.Marquess@btinternet.com>')
+ : ()
+ ),
+ );
+
+
+sub MY::postamble {
+ '
+$(NAME).pod: $(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
+ perl ./mkpod
+
+$(NAME).xs: typemap
+ $(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+
+' ;
+}
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB DBNAME ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME is optional, so pretend it has been parsed.
+ delete $Parsed{'DBNAME'} ;
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'BERKELEYDB_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'BERKELEYDB_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ print "Looks Good.\n" ;
+
+}
+
+# end of file Makefile.PL
diff --git a/storage/bdb/perl/BerkeleyDB/README b/storage/bdb/perl/BerkeleyDB/README
new file mode 100644
index 00000000000..a600e313193
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/README
@@ -0,0 +1,484 @@
+ BerkeleyDB
+
+ Version 0.20
+
+ 2nd Sept 2002
+
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+DESCRIPTION
+-----------
+
+BerkeleyDB is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 2 or greater. (Note: if
+you want to use version 1 of Berkeley DB with Perl you need the DB_File
+module).
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. BerkeleyDB provides an interface to all
+four of the database types (hash, btree, queue and recno) currently
+supported by Berkeley DB.
+
+For further details see the documentation in the file BerkeleyDB.pod.
+
+PREREQUISITES
+-------------
+
+Before you can build BerkeleyDB you need to have the following
+installed on your system:
+
+ * Perl 5.004_04 or greater.
+
+ * Berkeley DB Version 2.6.4 or greater
+
+ The official web site for Berkeley DB is http://www.sleepycat.com
+
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use BerkeleyDB
+ to access database files created by a third-party application,
+ like Sendmail. In these cases you must build BerkeleyDB with a
+ compatible version of Berkeley DB.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running Solaris 2.5, 2.7 or HP-UX 10 read either
+ the Solaris Notes or HP-UX Notes sections below.
+ If you are running Linux please read the Linux Notes section
+ before proceeding.
+
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+INSTALLATION
+------------
+
+ make install
+
+TROUBLESHOOTING
+===============
+
+Here are some of the problems that people encounter when building BerkeleyDB.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:52: db.h: No such file or directory
+
+or this:
+
+ cc -c -I./libraries/2.7.5 -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/BerkeleyDB/BerkeleyDB.so -shared
+ -L/usr/local/lib BerkeleyDB.o
+ -L/home/paul/perl/ext/BerkDB/BerkeleyDB/libraries -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+#error db.h is not for Berkeley DB at all.
+------------------------------------------
+
+If you get the error above when building this module it means that there
+is a file called "db.h" on your system that isn't the one that comes
+with Berkeley DB.
+
+Options:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. Edit config.in and make sure the INCLUDE variable points to the
+ directory where the Berkeley DB file db.h is installed.
+
+ 3. If option 2 doesn't work, try tempoarily renaming the db.h file
+ that is causing the error.
+
+#error db.h is for Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+--------------------------------------------------------------------
+
+The error above will occur if there is a copy of the Berkeley DB 1.x
+file db.h on your system.
+
+This error will happen when
+
+ 1. you only have Berkeley DB version 1 on your system.
+ Solution: get & install a newer version of Berkeley DB.
+
+ 2. you have both version 1 and a later version of Berkeley DB
+ installed on your system. When building BerkeleyDB it attempts to
+ use the db.h for Berkeley DB version 1.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+
+#error db.h is for Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
+------------------------------------------------------------------------
+
+The error above will occur if there is a copy of the the file db.h for
+Berkeley DB 2.0 to 2.5 on your system.
+
+This symptom can imply:
+
+ 1. You don't have a new enough version of Berkeley DB.
+ Solution: get & install a newer version of Berkeley DB.
+
+ 2. You have the correct version of Berkeley DB installed, but it isn't
+ in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+Undefined Symbol: txn_stat
+--------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: txn_stat
+ at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+ ...
+
+This error usually happens when you have both version 1 and a newer version
+of Berkeley DB installed on your system. BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2/3/4 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. BerkeleyDB can only be built with Berkeley DB version 2, 3 or 4.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_appinit
+----------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_appinit
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2 and the version 3
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_create
+---------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_create
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 3 and the version 2
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............
+ BerkeleyDB needs compatible versions of libdb & db.h
+ you have db.h version 2.6.4 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/btree.t line 25.
+ dubious
+ Test returned status 255 (wstat 65280, 0xff00)
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/BerkeleyDB/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with Berkeley DB 2.x or better.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+
+Solaris 2.5 Notes
+-----------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+BerkeleyDB test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+Solaris 2.7 Notes
+-----------------
+
+If you are running Solaris 2.7 and all the tests in the test harness
+generate a core dump, try applying Sun patch 106980-09 (or better).
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building this module with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (BerkeleyDB.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+
+FEEDBACK
+--------
+
+How to report a problem with BerkeleyDB.
+
+To help me help you, I need of the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The complete output from running "perl -V" will tell
+ me all I need to know.
+ If your perl does not understand the "-V" option is too old.
+ BerkeleyDB needs Perl version 5.004_04 or better.
+
+ 2. The version of BerkeleyDB you have. If you have successfully
+ installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print qq{BerkeleyDB ver $BerkeleyDB::VERSION\n}'
+
+ If you haven't installed BerkeleyDB then search BerkeleyDB.pm for a
+ line like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you have installed. If you have
+ successfully installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING.qq{\n}'
+
+ If you haven't installed BerkeleyDB then search db.h for a line
+ like this:
+
+ #define DB_VERSION_STRING
+
+ 4. If you are having problems building BerkeleyDB, send me a complete
+ log of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in
+ BerkeleyDB and you want me to fix it, you will *greatly* enhance
+ the chances of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
+
diff --git a/storage/bdb/perl/BerkeleyDB/Todo b/storage/bdb/perl/BerkeleyDB/Todo
new file mode 100644
index 00000000000..12d53bcf91c
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/Todo
@@ -0,0 +1,57 @@
+
+ * Proper documentation.
+
+ * address or document the "close all cursors if you encounter an error"
+
+ * Change the $BerkeleyDB::Error to store the info in the db object,
+ if possible.
+
+ * $BerkeleyDB::db_version is documented. &db_version isn't.
+
+ * migrate perl code into the .xs file where necessary
+
+ * convert as many of the DB examples files to BerkeleyDB format.
+
+ * add a method to the DB object to allow access to the environment (if there
+ actually is one).
+
+
+Possibles
+
+ * use '~' magic to store the inner data.
+
+ * for the get stuff zap the value to undef if it doesn't find the
+ key. This may be more intuitive for those folks who are used with
+ the $hash{key} interface.
+
+ * Text interface? This can be done as via Recno
+
+ * allow recno to allow base offset for arrays to be either 0 or 1.
+
+ * when duplicate keys are enabled, allow db_put($key, [$val1, $val2,...])
+
+
+2.x -> 3.x Upgrade
+==================
+
+Environment Verbose
+Env->open mode
+DB cache size extra parameter
+DB->open subdatabases Done
+An empty environment causes DB->open to fail
+where is __db.001 coming from? db_remove seems to create it. Bug in 3.0.55
+Change db_strerror for 0 to ""? Done
+Queue Done
+db_stat for Hash & Queue Done
+No TxnMgr
+DB->remove
+ENV->remove
+ENV->set_verbose
+upgrade
+
+ $env = BerkeleyDB::Env::Create
+ $env = create BerkeleyDB::Env
+ $status = $env->open()
+
+ $db = BerkeleyDB::Hash::Create
+ $status = $db->open()
diff --git a/storage/bdb/perl/BerkeleyDB/config.in b/storage/bdb/perl/BerkeleyDB/config.in
new file mode 100644
index 00000000000..fd1bb1caede
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/config.in
@@ -0,0 +1,43 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+INCLUDE = /usr/local/include
+#INCLUDE = /usr/local/BerkeleyDB/include
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+LIB = /usr/local/lib
+#LIB = /usr/local/BerkeleyDB/lib
+
+# 3. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have Berkeley DB 2.6.4 you could rename the
+# Berkeley DB library from libdb.a to libdb-2.6.4.a and change the
+# DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.6.4
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-3.0
+
+# end of file config.in
diff --git a/storage/bdb/perl/BerkeleyDB/constants.h b/storage/bdb/perl/BerkeleyDB/constants.h
new file mode 100644
index 00000000000..d86cef15513
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/constants.h
@@ -0,0 +1,4046 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_DUP DB_PAD DB_RMW DB_SET */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'D':
+ if (memEQ(name, "DB_DUP", 6)) {
+ /* ^ */
+#ifdef DB_DUP
+ *iv_return = DB_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAD", 6)) {
+ /* ^ */
+#ifdef DB_PAD
+ *iv_return = DB_PAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RMW", 6)) {
+ /* ^ */
+#ifdef DB_RMW
+ *iv_return = DB_RMW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET", 6)) {
+ /* ^ */
+#ifdef DB_SET
+ *iv_return = DB_SET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_EXCL DB_HASH DB_LAST DB_NEXT DB_PREV */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'E':
+ if (memEQ(name, "DB_EXCL", 7)) {
+ /* ^ */
+#ifdef DB_EXCL
+ *iv_return = DB_EXCL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASH", 7)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LAST", 7)) {
+ /* ^ */
+#ifdef DB_LAST
+ *iv_return = DB_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEXT", 7)) {
+ /* ^ */
+#ifdef DB_NEXT
+ *iv_return = DB_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PREV", 7)) {
+ /* ^ */
+#ifdef DB_PREV
+ *iv_return = DB_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO */
+ /* Offset 4 gives the best switch position. */
+ switch (name[4]) {
+ case 'E':
+ if (memEQ(name, "DB_RECNO", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_AFTER", 8)) {
+ /* ^ */
+#ifdef DB_AFTER
+ *iv_return = DB_AFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_FIRST", 8)) {
+ /* ^ */
+#ifdef DB_FIRST
+ *iv_return = DB_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_FLUSH", 8)) {
+ /* ^ */
+#ifdef DB_FLUSH
+ *iv_return = DB_FLUSH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_FORCE", 8)) {
+ /* ^ */
+#ifdef DB_FORCE
+ *iv_return = DB_FORCE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_BTREE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_QUEUE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 55)
+ *iv_return = DB_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPEND DB_BEFORE DB_CLIENT DB_COMMIT DB_CREATE DB_CURLSN DB_DIRECT
+ DB_EXTENT DB_GETREC DB_NOCOPY DB_NOMMAP DB_NOSYNC DB_RDONLY DB_RECNUM
+ DB_THREAD DB_VERIFY */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'A':
+ if (memEQ(name, "DB_NOMMAP", 9)) {
+ /* ^ */
+#ifdef DB_NOMMAP
+ *iv_return = DB_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_THREAD", 9)) {
+ /* ^ */
+#ifdef DB_THREAD
+ *iv_return = DB_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_DIRECT", 9)) {
+ /* ^ */
+#ifdef DB_DIRECT
+ *iv_return = DB_DIRECT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_GETREC", 9)) {
+ /* ^ */
+#ifdef DB_GETREC
+ *iv_return = DB_GETREC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_VERIFY", 9)) {
+ /* ^ */
+#ifdef DB_VERIFY
+ *iv_return = DB_VERIFY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_COMMIT", 9)) {
+ /* ^ */
+#ifdef DB_COMMIT
+ *iv_return = DB_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_RDONLY", 9)) {
+ /* ^ */
+#ifdef DB_RDONLY
+ *iv_return = DB_RDONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APPEND", 9)) {
+ /* ^ */
+#ifdef DB_APPEND
+ *iv_return = DB_APPEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CLIENT", 9)) {
+ /* ^ */
+#ifdef DB_CLIENT
+ *iv_return = DB_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_EXTENT", 9)) {
+ /* ^ */
+#ifdef DB_EXTENT
+ *iv_return = DB_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSYNC", 9)) {
+ /* ^ */
+#ifdef DB_NOSYNC
+ *iv_return = DB_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_NOCOPY", 9)) {
+ /* ^ */
+#ifdef DB_NOCOPY
+ *iv_return = DB_NOCOPY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BEFORE", 9)) {
+ /* ^ */
+#ifdef DB_BEFORE
+ *iv_return = DB_BEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_CURLSN", 9)) {
+ /* ^ */
+#ifdef DB_CURLSN
+ *iv_return = DB_CURLSN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_CREATE", 9)) {
+ /* ^ */
+#ifdef DB_CREATE
+ *iv_return = DB_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_RECNUM", 9)) {
+ /* ^ */
+#ifdef DB_RECNUM
+ *iv_return = DB_RECNUM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CONSUME DB_CURRENT DB_DELETED DB_DUPSORT DB_ENCRYPT DB_ENV_CDB
+ DB_ENV_TXN DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH DB_PRIVATE
+ DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN
+ DB_UPGRADE */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'D':
+ if (memEQ(name, "DB_ENV_CDB", 10)) {
+ /* ^ */
+#ifdef DB_ENV_CDB
+ *iv_return = DB_ENV_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UPGRADE", 10)) {
+ /* ^ */
+#ifdef DB_UPGRADE
+ *iv_return = DB_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_DELETED", 10)) {
+ /* ^ */
+#ifdef DB_DELETED
+ *iv_return = DB_DELETED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECOVER", 10)) {
+ /* ^ */
+#ifdef DB_RECOVER
+ *iv_return = DB_RECOVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_PR_PAGE", 10)) {
+ /* ^ */
+#ifdef DB_PR_PAGE
+ *iv_return = DB_PR_PAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SALVAGE", 10)) {
+ /* ^ */
+#ifdef DB_SALVAGE
+ *iv_return = DB_SALVAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_NOPANIC", 10)) {
+ /* ^ */
+#ifdef DB_NOPANIC
+ *iv_return = DB_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_TXN_CKP", 10)) {
+ /* ^ */
+#ifdef DB_TXN_CKP
+ *iv_return = DB_TXN_CKP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_CONSUME", 10)) {
+ /* ^ */
+#ifdef DB_CONSUME
+ *iv_return = DB_CONSUME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_CURRENT", 10)) {
+ /* ^ */
+#ifdef DB_CURRENT
+ *iv_return = DB_CURRENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOINENV", 10)) {
+ /* ^ */
+#ifdef DB_JOINENV
+ *iv_return = DB_JOINENV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENCRYPT", 10)) {
+ /* ^ */
+#ifdef DB_ENCRYPT
+ *iv_return = DB_ENCRYPT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_DUPSORT", 10)) {
+ /* ^ */
+#ifdef DB_DUPSORT
+ *iv_return = DB_DUPSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_KEYLAST", 10)) {
+ /* ^ */
+#ifdef DB_KEYLAST
+ *iv_return = DB_KEYLAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_HASH", 10)) {
+ /* ^ */
+#ifdef DB_OK_HASH
+ *iv_return = DB_OK_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIVATE", 10)) {
+ /* ^ */
+#ifdef DB_PRIVATE
+ *iv_return = DB_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TIMEOUT", 10)) {
+ /* ^ */
+#ifdef DB_TIMEOUT
+ *iv_return = DB_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_UNKNOWN", 10)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_UNKNOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_ENV_TXN", 10)) {
+ /* ^ */
+#ifdef DB_ENV_TXN
+ *iv_return = DB_ENV_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_FIXEDLEN DB_GET_BOTH DB_INIT_CDB
+ DB_INIT_LOG DB_INIT_TXN DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_LOCKDOWN
+ DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC DB_LOG_DISK DB_MULTIPLE DB_NEXT_DUP
+ DB_NOSERVER DB_NOTFOUND DB_OK_BTREE DB_OK_QUEUE DB_OK_RECNO DB_POSITION
+ DB_QAMMAGIC DB_RENUMBER DB_SNAPSHOT DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK
+ DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO DB_WRNOSYNC DB_YIELDCPU */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_ABS", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_ABS
+ *iv_return = DB_ARCH_ABS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TRUNCATE", 11)) {
+ /* ^ */
+#ifdef DB_TRUNCATE
+ *iv_return = DB_TRUNCATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_RENUMBER", 11)) {
+ /* ^ */
+#ifdef DB_RENUMBER
+ *iv_return = DB_RENUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INIT_CDB", 11)) {
+ /* ^ */
+#ifdef DB_INIT_CDB
+ *iv_return = DB_INIT_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_RECNO", 11)) {
+ /* ^ */
+#ifdef DB_OK_RECNO
+ *iv_return = DB_OK_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_YIELDCPU", 11)) {
+ /* ^ */
+#ifdef DB_YIELDCPU
+ *iv_return = DB_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_NEXT_DUP", 11)) {
+ /* ^ */
+#ifdef DB_NEXT_DUP
+ *iv_return = DB_NEXT_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_OK_QUEUE", 11)) {
+ /* ^ */
+#ifdef DB_OK_QUEUE
+ *iv_return = DB_OK_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_REDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_REDO
+ *iv_return = DB_TXN_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_GET", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_GET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_LOGMAGIC
+ *iv_return = DB_LOGMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_QAMMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_QAMMAGIC
+ *iv_return = DB_QAMMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_TXNMAGIC
+ *iv_return = DB_TXNMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_SNAPSHOT", 11)) {
+ /* ^ */
+#ifdef DB_SNAPSHOT
+ *iv_return = DB_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_KEYEXIST", 11)) {
+ /* ^ */
+#ifdef DB_KEYEXIST
+ *iv_return = DB_KEYEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_DISK", 11)) {
+ /* ^ */
+#ifdef DB_LOG_DISK
+ *iv_return = DB_LOG_DISK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITION", 11)) {
+ /* ^ */
+#ifdef DB_POSITION
+ *iv_return = DB_POSITION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ARCH_LOG", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_LOG
+ *iv_return = DB_ARCH_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FIXEDLEN", 11)) {
+ /* ^ */
+#ifdef DB_FIXEDLEN
+ *iv_return = DB_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INIT_LOG", 11)) {
+ /* ^ */
+#ifdef DB_INIT_LOG
+ *iv_return = DB_INIT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APP_INIT", 11)) {
+ /* ^ */
+#ifdef DB_APP_INIT
+ *iv_return = DB_APP_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_UNDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_UNDO
+ *iv_return = DB_TXN_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_GET_BOTH", 11)) {
+ /* ^ */
+#ifdef DB_GET_BOTH
+ *iv_return = DB_GET_BOTH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKDOWN", 11)) {
+ /* ^ */
+#ifdef DB_LOCKDOWN
+ *iv_return = DB_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK", 11)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK
+ *iv_return = DB_TXN_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_KEYEMPTY", 11)) {
+ /* ^ */
+#ifdef DB_KEYEMPTY
+ *iv_return = DB_KEYEMPTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MULTIPLE", 11)) {
+ /* ^ */
+#ifdef DB_MULTIPLE
+ *iv_return = DB_MULTIPLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_KEYFIRST", 11)) {
+ /* ^ */
+#ifdef DB_KEYFIRST
+ *iv_return = DB_KEYFIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_BTREE", 11)) {
+ /* ^ */
+#ifdef DB_OK_BTREE
+ *iv_return = DB_OK_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_INIT_TXN", 11)) {
+ /* ^ */
+#ifdef DB_INIT_TXN
+ *iv_return = DB_INIT_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_NOTFOUND", 11)) {
+ /* ^ */
+#ifdef DB_NOTFOUND
+ *iv_return = DB_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_NOSERVER", 11)) {
+ /* ^ */
+#ifdef DB_NOSERVER
+ *iv_return = DB_NOSERVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_TXN_SYNC", 11)) {
+ /* ^ */
+#ifdef DB_TXN_SYNC
+ *iv_return = DB_TXN_SYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRNOSYNC", 11)) {
+ /* ^ */
+#ifdef DB_WRNOSYNC
+ *iv_return = DB_WRNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_12 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ARCH_DATA DB_CDB_ALLDB DB_CL_WRITER DB_DELIMITER DB_DIRECT_DB
+ DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC DB_GET_RECNO
+ DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC DB_LOCK_DUMP
+ DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW DB_NEEDSPLIT
+ DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE DB_QAMOLDVER
+ DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES DB_TEMPORARY DB_TXN_ABORT
+ DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK DB_WRITEOPEN DB_XA_CREATE */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_DATA", 12)) {
+ /* ^ */
+#ifdef DB_ARCH_DATA
+ *iv_return = DB_ARCH_DATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_CDB_ALLDB", 12)) {
+ /* ^ */
+#ifdef DB_CDB_ALLDB
+ *iv_return = DB_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CL_WRITER", 12)) {
+ /* ^ */
+#ifdef DB_CL_WRITER
+ *iv_return = DB_CL_WRITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_DELIMITER", 12)) {
+ /* ^ */
+#ifdef DB_DELIMITER
+ *iv_return = DB_DELIMITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_DB", 12)) {
+ /* ^ */
+#ifdef DB_DIRECT_DB
+ *iv_return = DB_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DUPCURSOR", 12)) {
+ /* ^ */
+#ifdef DB_DUPCURSOR
+ *iv_return = DB_DUPCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_FATAL", 12)) {
+ /* ^ */
+#ifdef DB_ENV_FATAL
+ *iv_return = DB_ENV_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_FAST_STAT", 12)) {
+ /* ^ */
+#ifdef DB_FAST_STAT
+ *iv_return = DB_FAST_STAT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_GET_BOTHC", 12)) {
+ /* ^ */
+#ifdef DB_GET_BOTHC
+ *iv_return = DB_GET_BOTHC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_GET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_GET_RECNO
+ *iv_return = DB_GET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASHMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_HASHMAGIC
+ *iv_return = DB_HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_INIT_LOCK
+ *iv_return = DB_INIT_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'J':
+ if (memEQ(name, "DB_JOIN_ITEM", 12)) {
+ /* ^ */
+#ifdef DB_JOIN_ITEM
+ *iv_return = DB_JOIN_ITEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCKMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_LOCKMAGIC
+ *iv_return = DB_LOCKMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DUMP", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_DUMP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RW_N", 12)) {
+ /* ^ */
+#ifdef DB_LOCK_RW_N
+ *iv_return = DB_LOCK_RW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_LOGOLDVER
+ *iv_return = DB_LOGOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_MAX_PAGES", 12)) {
+ /* ^ */
+#ifdef DB_MAX_PAGES
+ *iv_return = DB_MAX_PAGES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_NEW", 12)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW
+ *iv_return = DB_MPOOL_NEW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEEDSPLIT", 12)) {
+ /* ^ */
+#ifdef DB_NEEDSPLIT
+ *iv_return = DB_NEEDSPLIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NODUPDATA", 12)) {
+ /* ^ */
+#ifdef DB_NODUPDATA
+ *iv_return = DB_NODUPDATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOLOCKING", 12)) {
+ /* ^ */
+#ifdef DB_NOLOCKING
+ *iv_return = DB_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NORECURSE", 12)) {
+ /* ^ */
+#ifdef DB_NORECURSE
+ *iv_return = DB_NORECURSE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_OVERWRITE", 12)) {
+ /* ^ */
+#ifdef DB_OVERWRITE
+ *iv_return = DB_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAGEYIELD", 12)) {
+ /* ^ */
+#ifdef DB_PAGEYIELD
+ *iv_return = DB_PAGEYIELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_PAGE_LOCK
+ *iv_return = DB_PAGE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PERMANENT", 12)) {
+ /* ^ */
+#ifdef DB_PERMANENT
+ *iv_return = DB_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITIONI", 12)) {
+ /* ^ */
+#ifdef DB_POSITIONI
+ *iv_return = DB_POSITIONI;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRINTABLE", 12)) {
+ /* ^ */
+#ifdef DB_PRINTABLE
+ *iv_return = DB_PRINTABLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_QAMOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_QAMOLDVER
+ *iv_return = DB_QAMOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET_RANGE", 12)) {
+ /* ^ */
+#ifdef DB_SET_RANGE
+ *iv_return = DB_SET_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_SET_RECNO
+ *iv_return = DB_SET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SWAPBYTES", 12)) {
+ /* ^ */
+#ifdef DB_SWAPBYTES
+ *iv_return = DB_SWAPBYTES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEMPORARY", 12)) {
+ /* ^ */
+#ifdef DB_TEMPORARY
+ *iv_return = DB_TEMPORARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_ABORT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_ABORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_APPLY", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_TXN_APPLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_PRINT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_PRINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_WRITELOCK", 12)) {
+ /* ^ */
+#ifdef DB_WRITELOCK
+ *iv_return = DB_WRITELOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRITEOPEN", 12)) {
+ /* ^ */
+#ifdef DB_WRITEOPEN
+ *iv_return = DB_WRITEOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_XA_CREATE", 12)) {
+ /* ^ */
+#ifdef DB_XA_CREATE
+ *iv_return = DB_XA_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_13 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AGGRESSIVE DB_BTREEMAGIC DB_CHECKPOINT DB_DIRECT_LOG DB_DIRTY_READ
+ DB_DONOTINDEX DB_ENV_CREATE DB_ENV_NOMMAP DB_ENV_THREAD DB_HASHOLDVER
+ DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_NORUN DB_LOCK_RIW_N DB_LOCK_TRADE
+ DB_LOGVERSION DB_LOG_LOCKED DB_MPOOL_LAST DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEXT_NODUP DB_NOORDERCHK DB_PREV_NODUP DB_PR_HEADERS DB_QAMVERSION
+ DB_RDWRMASTER DB_REGISTERED DB_REP_CLIENT DB_REP_MASTER DB_SEQUENTIAL
+ DB_STAT_CLEAR DB_SYSTEM_MEM DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT
+ DB_VERIFY_BAD */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'A':
+ if (memEQ(name, "DB_STAT_CLEAR", 13)) {
+ /* ^ */
+#ifdef DB_STAT_CLEAR
+ *iv_return = DB_STAT_CLEAR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INCOMPLETE", 13)) {
+ /* ^ */
+#ifdef DB_INCOMPLETE
+ *iv_return = DB_INCOMPLETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NORUN", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_NORUN
+ *iv_return = DB_LOCK_NORUN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RIW_N", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_RIW_N
+ *iv_return = DB_LOCK_RIW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_TRADE", 13)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_LOCK_TRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_CHECKPOINT", 13)) {
+ /* ^ */
+#ifdef DB_CHECKPOINT
+ *iv_return = DB_CHECKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PREV_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_PREV_NODUP
+ *iv_return = DB_PREV_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_AGGRESSIVE", 13)) {
+ /* ^ */
+#ifdef DB_AGGRESSIVE
+ *iv_return = DB_AGGRESSIVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGVERSION", 13)) {
+ /* ^ */
+#ifdef DB_LOGVERSION
+ *iv_return = DB_LOGVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_LOCKED", 13)) {
+ /* ^ */
+#ifdef DB_LOG_LOCKED
+ *iv_return = DB_LOG_LOCKED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGISTERED", 13)) {
+ /* ^ */
+#ifdef DB_REGISTERED
+ *iv_return = DB_REGISTERED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_MPOOL", 13)) {
+ /* ^ */
+#ifdef DB_INIT_MPOOL
+ *iv_return = DB_INIT_MPOOL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_QAMVERSION", 13)) {
+ /* ^ */
+#ifdef DB_QAMVERSION
+ *iv_return = DB_QAMVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_DONOTINDEX", 13)) {
+ /* ^ */
+#ifdef DB_DONOTINDEX
+ *iv_return = DB_DONOTINDEX;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNVERSION", 13)) {
+ /* ^ */
+#ifdef DB_TXNVERSION
+ *iv_return = DB_TXNVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOSYNC", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOSYNC
+ *iv_return = DB_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOWAIT", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOWAIT
+ *iv_return = DB_TXN_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_MPOOL_LAST", 13)) {
+ /* ^ */
+#ifdef DB_MPOOL_LAST
+ *iv_return = DB_MPOOL_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOORDERCHK", 13)) {
+ /* ^ */
+#ifdef DB_NOORDERCHK
+ *iv_return = DB_NOORDERCHK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_REP_CLIENT", 13)) {
+ /* ^ */
+#ifdef DB_REP_CLIENT
+ *iv_return = DB_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_MASTER", 13)) {
+ /* ^ */
+#ifdef DB_REP_MASTER
+ *iv_return = DB_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_SEQUENTIAL", 13)) {
+ /* ^ */
+#ifdef DB_SEQUENTIAL
+ *iv_return = DB_SEQUENTIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEMAGIC", 13)) {
+ /* ^ */
+#ifdef DB_BTREEMAGIC
+ *iv_return = DB_BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_LOG", 13)) {
+ /* ^ */
+#ifdef DB_DIRECT_LOG
+ *iv_return = DB_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRTY_READ", 13)) {
+ /* ^ */
+#ifdef DB_DIRTY_READ
+ *iv_return = DB_DIRTY_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_BAD", 13)) {
+ /* ^ */
+#ifdef DB_VERIFY_BAD
+ *iv_return = DB_VERIFY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_HASHOLDVER", 13)) {
+ /* ^ */
+#ifdef DB_HASHOLDVER
+ *iv_return = DB_HASHOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SYSTEM_MEM", 13)) {
+ /* ^ */
+#ifdef DB_SYSTEM_MEM
+ *iv_return = DB_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_MUTEXDEBUG", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXDEBUG
+ *iv_return = DB_MUTEXDEBUG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MUTEXLOCKS", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXLOCKS
+ *iv_return = DB_MUTEXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_ENV_CREATE", 13)) {
+ /* ^ */
+#ifdef DB_ENV_CREATE
+ *iv_return = DB_ENV_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOMMAP", 13)) {
+ /* ^ */
+#ifdef DB_ENV_NOMMAP
+ *iv_return = DB_ENV_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_THREAD", 13)) {
+ /* ^ */
+#ifdef DB_ENV_THREAD
+ *iv_return = DB_ENV_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_RDWRMASTER", 13)) {
+ /* ^ */
+#ifdef DB_RDWRMASTER
+ *iv_return = DB_RDWRMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_NEXT_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_NEXT_NODUP
+ *iv_return = DB_NEXT_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PR_HEADERS", 13)) {
+ /* ^ */
+#ifdef DB_PR_HEADERS
+ *iv_return = DB_PR_HEADERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_14 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AUTO_COMMIT DB_BTREEOLDVER DB_CHKSUM_SHA1 DB_EID_INVALID DB_ENCRYPT_AES
+ DB_ENV_APPINIT DB_ENV_DBLOCAL DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOPANIC
+ DB_ENV_PRIVATE DB_FILE_ID_LEN DB_HANDLE_LOCK DB_HASHVERSION DB_INVALID_EID
+ DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH DB_MAX_RECORDS
+ DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID DB_ODDFILESIZE
+ DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK DB_REGION_ANON
+ DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_NEWSITE DB_REP_UNAVAIL
+ DB_REVSPLITOFF DB_RUNRECOVERY DB_SET_TXN_NOW DB_USE_ENVIRON DB_WRITECURSOR
+ DB_XIDDATASIZE */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'A':
+ if (memEQ(name, "DB_LOCK_RANDOM", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RANDOM
+ *iv_return = DB_LOCK_RANDOM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPEN_CALLED", 14)) {
+ /* ^ */
+#ifdef DB_OPEN_CALLED
+ *iv_return = DB_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_UNAVAIL", 14)) {
+ /* ^ */
+#ifdef DB_REP_UNAVAIL
+ *iv_return = DB_REP_UNAVAIL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_XIDDATASIZE", 14)) {
+ /* ^ */
+#ifdef DB_XIDDATASIZE
+ *iv_return = DB_XIDDATASIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_ENV_LOCKING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKING
+ *iv_return = DB_ENV_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MAX_RECORDS", 14)) {
+ /* ^ */
+#ifdef DB_MAX_RECORDS
+ *iv_return = DB_MAX_RECORDS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CLEAN", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_CLEAN
+ *iv_return = DB_MPOOL_CLEAN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORDCOUNT", 14)) {
+ /* ^ */
+#ifdef DB_RECORDCOUNT
+ *iv_return = DB_RECORDCOUNT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_FILE_ID_LEN", 14)) {
+ /* ^ */
+#ifdef DB_FILE_ID_LEN
+ *iv_return = DB_FILE_ID_LEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INVALID_EID", 14)) {
+ /* ^ */
+#ifdef DB_INVALID_EID
+ *iv_return = DB_INVALID_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DIRTY", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_DIRTY
+ *iv_return = DB_MPOOL_DIRTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_RECORD", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RECORD
+ *iv_return = DB_LOCK_RECORD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_REMOVE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_REMOVE
+ *iv_return = DB_LOCK_REMOVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSERVER_ID", 14)) {
+ /* ^ */
+#ifdef DB_NOSERVER_ID
+ *iv_return = DB_NOSERVER_ID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ODDFILESIZE", 14)) {
+ /* ^ */
+#ifdef DB_ODDFILESIZE
+ *iv_return = DB_ODDFILESIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_ENV_LOGGING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOGGING
+ *iv_return = DB_ENV_LOGGING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PRIVATE", 14)) {
+ /* ^ */
+#ifdef DB_ENV_PRIVATE
+ *iv_return = DB_ENV_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REVSPLITOFF", 14)) {
+ /* ^ */
+#ifdef DB_REVSPLITOFF
+ *iv_return = DB_REVSPLITOFF;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_BTREEOLDVER", 14)) {
+ /* ^ */
+#ifdef DB_BTREEOLDVER
+ *iv_return = DB_BTREEOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_DBLOCAL", 14)) {
+ /* ^ */
+#ifdef DB_ENV_DBLOCAL
+ *iv_return = DB_ENV_DBLOCAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_OLDEST", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_OLDEST
+ *iv_return = DB_LOCK_OLDEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_RENAMEMAGIC", 14)) {
+ /* ^ */
+#ifdef DB_RENAMEMAGIC
+ *iv_return = DB_RENAMEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_SET_TXN_NOW", 14)) {
+ /* ^ */
+#ifdef DB_SET_TXN_NOW
+ *iv_return = DB_SET_TXN_NOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_AUTO_COMMIT", 14)) {
+ /* ^ */
+#ifdef DB_AUTO_COMMIT
+ *iv_return = DB_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOIN_NOSORT", 14)) {
+ /* ^ */
+#ifdef DB_JOIN_NOSORT
+ *iv_return = DB_JOIN_NOSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOWAIT", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_NOWAIT
+ *iv_return = DB_LOCK_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RUNRECOVERY", 14)) {
+ /* ^ */
+#ifdef DB_RUNRECOVERY
+ *iv_return = DB_RUNRECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_APPINIT", 14)) {
+ /* ^ */
+#ifdef DB_ENV_APPINIT
+ *iv_return = DB_ENV_APPINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOPANIC", 14)) {
+ /* ^ */
+#ifdef DB_ENV_NOPANIC
+ *iv_return = DB_ENV_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_HASHVERSION", 14)) {
+ /* ^ */
+#ifdef DB_HASHVERSION
+ *iv_return = DB_HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKVERSION", 14)) {
+ /* ^ */
+#ifdef DB_LOCKVERSION
+ *iv_return = DB_LOCKVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OLD_VERSION", 14)) {
+ /* ^ */
+#ifdef DB_OLD_VERSION
+ *iv_return = DB_OLD_VERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENCRYPT_AES", 14)) {
+ /* ^ */
+#ifdef DB_ENCRYPT_AES
+ *iv_return = DB_ENCRYPT_AES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_WRITECURSOR", 14)) {
+ /* ^ */
+#ifdef DB_WRITECURSOR
+ *iv_return = DB_WRITECURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_EID_INVALID", 14)) {
+ /* ^ */
+#ifdef DB_EID_INVALID
+ *iv_return = DB_EID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_USE_ENVIRON", 14)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON
+ *iv_return = DB_USE_ENVIRON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_SWITCH", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_SWITCH
+ *iv_return = DB_LOCK_SWITCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOOVERWRITE", 14)) {
+ /* ^ */
+#ifdef DB_NOOVERWRITE
+ *iv_return = DB_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWSITE", 14)) {
+ /* ^ */
+#ifdef DB_REP_NEWSITE
+ *iv_return = DB_REP_NEWSITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_EXPIRE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_EXPIRE
+ *iv_return = DB_LOCK_EXPIRE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CHKSUM_SHA1", 14)) {
+ /* ^ */
+#ifdef DB_CHKSUM_SHA1
+ *iv_return = DB_CHKSUM_SHA1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_HANDLE_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_HANDLE_LOCK
+ *iv_return = DB_HANDLE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORD_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_RECORD_LOCK
+ *iv_return = DB_RECORD_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_ANON", 14)) {
+ /* ^ */
+#ifdef DB_REGION_ANON
+ *iv_return = DB_REGION_ANON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_INIT", 14)) {
+ /* ^ */
+#ifdef DB_REGION_INIT
+ *iv_return = DB_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_NAME", 14)) {
+ /* ^ */
+#ifdef DB_REGION_NAME
+ *iv_return = DB_REGION_NAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_15 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPLY_LOGREG DB_BTREEVERSION DB_CONSUME_WAIT DB_ENV_LOCKDOWN
+ DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT DB_LOCK_INHERIT
+ DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ DB_LOCK_TIMEOUT
+ DB_LOCK_UPGRADE DB_MPOOL_CREATE DB_MPOOL_EXTENT DB_MULTIPLE_KEY
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_PRIORITY_LOW DB_REGION_MAGIC
+ DB_REP_LOGSONLY DB_REP_OUTDATED DB_SURPRISE_KID DB_TEST_POSTLOG
+ DB_TEST_PREOPEN DB_TXN_GETPGNOS DB_TXN_LOCK_2PL DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_VERIFY_FATAL */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'D':
+ if (memEQ(name, "DB_REP_OUTDATED", 15)) {
+ /* ^ */
+#ifdef DB_REP_OUTDATED
+ *iv_return = DB_REP_OUTDATED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_MULTIPLE_KEY", 15)) {
+ /* ^ */
+#ifdef DB_MULTIPLE_KEY
+ *iv_return = DB_MULTIPLE_KEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SURPRISE_KID", 15)) {
+ /* ^ */
+#ifdef DB_SURPRISE_KID
+ *iv_return = DB_SURPRISE_KID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_PREOPEN", 15)) {
+ /* ^ */
+#ifdef DB_TEST_PREOPEN
+ *iv_return = DB_TEST_PREOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_LOCK_DEFAULT", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_DEFAULT
+ *iv_return = DB_LOCK_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_FATAL", 15)) {
+ /* ^ */
+#ifdef DB_VERIFY_FATAL
+ *iv_return = DB_VERIFY_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_UPGRADE", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_UPGRADE
+ *iv_return = DB_LOCK_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_LOCK_INHERIT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \
+ DB_VERSION_PATCH >= 1)
+ *iv_return = DB_LOCK_INHERIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PANIC_OK", 15)) {
+ /* ^ */
+#ifdef DB_ENV_PANIC_OK
+ *iv_return = DB_ENV_PANIC_OK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_LOCKDOWN", 15)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKDOWN
+ *iv_return = DB_ENV_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ORDERCHKONLY", 15)) {
+ /* ^ */
+#ifdef DB_ORDERCHKONLY
+ *iv_return = DB_ORDERCHKONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK_2PL", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_2PL
+ *iv_return = DB_TXN_LOCK_2PL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_YIELDCPU", 15)) {
+ /* ^ */
+#ifdef DB_ENV_YIELDCPU
+ *iv_return = DB_ENV_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_LOCK_TIMEOUT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_MAGIC", 15)) {
+ /* ^ */
+#ifdef DB_REGION_MAGIC
+ *iv_return = DB_REGION_MAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_APPLY_LOGREG", 15)) {
+ /* ^ */
+#ifdef DB_APPLY_LOGREG
+ *iv_return = DB_APPLY_LOGREG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_TXN_GETPGNOS", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_GETPGNOS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEVERSION", 15)) {
+ /* ^ */
+#ifdef DB_BTREEVERSION
+ *iv_return = DB_BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CREATE", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_CREATE
+ *iv_return = DB_MPOOL_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_REP_LOGSONLY", 15)) {
+ /* ^ */
+#ifdef DB_REP_LOGSONLY
+ *iv_return = DB_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOG", 15)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOG
+ *iv_return = DB_TEST_POSTLOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_LOCK_NOTHELD", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTHELD
+ *iv_return = DB_LOCK_NOTHELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_ALL", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_ALL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_OBJ", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_OBJ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_MPOOL_EXTENT", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_EXTENT
+ *iv_return = DB_MPOOL_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PRIORITY_LOW", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CONSUME_WAIT", 15)) {
+ /* ^ */
+#ifdef DB_CONSUME_WAIT
+ *iv_return = DB_CONSUME_WAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPFLAGS_MASK", 15)) {
+ /* ^ */
+#ifdef DB_OPFLAGS_MASK
+ *iv_return = DB_OPFLAGS_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_MASK", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_MASK
+ *iv_return = DB_TXN_LOG_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_REDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_REDO
+ *iv_return = DB_TXN_LOG_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_UNDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDO
+ *iv_return = DB_TXN_LOG_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_16 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_BROADCAST_EID DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB
+ DB_ENV_DIRECT_DB DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT
+ DB_FCNTL_LOCKING DB_JAVA_CALLBACK DB_LOCK_CONFLICT DB_LOCK_DEADLOCK
+ DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NOTEXIST
+ DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_MPOOL_DISCARD
+ DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND DB_PRIORITY_HIGH
+ DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER DB_REP_PERMANENT
+ DB_SECONDARY_BAD DB_TEST_POSTOPEN DB_TEST_POSTSYNC DB_TXN_LOCK_MASK
+ DB_TXN_OPENFILES DB_VERB_CHKPOINT DB_VERB_DEADLOCK DB_VERB_RECOVERY
+ DB_VERB_WAITSFOR DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK */
+ /* Offset 12 gives the best switch position. */
+ switch (name[12]) {
+ case 'A':
+ if (memEQ(name, "DB_RECOVER_FATAL", 16)) {
+ /* ^ */
+#ifdef DB_RECOVER_FATAL
+ *iv_return = DB_RECOVER_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MAJOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MAJOR
+ *iv_return = DB_VERSION_MAJOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_PATCH", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_PATCH
+ *iv_return = DB_VERSION_PATCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_JAVA_CALLBACK", 16)) {
+ /* ^ */
+#ifdef DB_JAVA_CALLBACK
+ *iv_return = DB_JAVA_CALLBACK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_EID_BROADCAST", 16)) {
+ /* ^ */
+#ifdef DB_EID_BROADCAST
+ *iv_return = DB_EID_BROADCAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DISCARD", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_DISCARD
+ *iv_return = DB_MPOOL_DISCARD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_YOUNGEST
+ *iv_return = DB_LOCK_YOUNGEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_NOSERVER_HOME", 16)) {
+ /* ^ */
+#ifdef DB_NOSERVER_HOME
+ *iv_return = DB_NOSERVER_HOME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRIORITY_HIGH", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT
+ *iv_return = DB_ENV_RPCCLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_OPENFILES", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_OPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MINOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MINOR
+ *iv_return = DB_VERSION_MINOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_NOLOCKING", 16)) {
+ /* ^ */
+#ifdef DB_ENV_NOLOCKING
+ *iv_return = DB_ENV_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FCNTL_LOCKING", 16)) {
+ /* ^ */
+#ifdef DB_FCNTL_LOCKING
+ *iv_return = DB_FCNTL_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_CDB_ALLDB
+ *iv_return = DB_ENV_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_CONFLICT", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_CONFLICT
+ *iv_return = DB_LOCK_CONFLICT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_DEADLOCK
+ *iv_return = DB_LOCK_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_VERB_DEADLOCK
+ *iv_return = DB_VERB_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_MASK
+ *iv_return = DB_TXN_LOCK_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) {
+ /* ^ */
+#ifdef DB_VRFY_FLAGMASK
+ *iv_return = DB_VRFY_FLAGMASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_REP_PERMANENT", 16)) {
+ /* ^ */
+#ifdef DB_REP_PERMANENT
+ *iv_return = DB_REP_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MAXLOCKS
+ *iv_return = DB_LOCK_MAXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINLOCKS
+ *iv_return = DB_LOCK_MINLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) {
+ /* ^ */
+#ifdef DB_PAGE_NOTFOUND
+ *iv_return = DB_PAGE_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTOPEN", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTOPEN
+ *iv_return = DB_TEST_POSTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_CHKPOINT", 16)) {
+ /* ^ */
+#ifdef DB_VERB_CHKPOINT
+ *iv_return = DB_VERB_CHKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_ENV_OVERWRITE", 16)) {
+ /* ^ */
+#ifdef DB_ENV_OVERWRITE
+ *iv_return = DB_ENV_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINWRITE", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINWRITE
+ *iv_return = DB_LOCK_MINWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_READ", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_PUT_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) {
+ /* ^ */
+#ifdef DB_LOGC_BUF_SIZE
+ *iv_return = DB_LOGC_BUF_SIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_DUPMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_DUPMASTER
+ *iv_return = DB_REP_DUPMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_NEWMASTER
+ *iv_return = DB_REP_NEWMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTSYNC", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTSYNC
+ *iv_return = DB_TEST_POSTSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_WAITSFOR", 16)) {
+ /* ^ */
+#ifdef DB_VERB_WAITSFOR
+ *iv_return = DB_VERB_WAITSFOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_DB
+ *iv_return = DB_ENV_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_CACHED_COUNTS", 16)) {
+ /* ^ */
+#ifdef DB_CACHED_COUNTS
+ *iv_return = DB_CACHED_COUNTS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_PRIVATE
+ *iv_return = DB_MPOOL_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_RECOVERY", 16)) {
+ /* ^ */
+#ifdef DB_VERB_RECOVERY
+ *iv_return = DB_VERB_RECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTEXIST
+ *iv_return = DB_LOCK_NOTEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_BROADCAST_EID", 16)) {
+ /* ^ */
+#ifdef DB_BROADCAST_EID
+ *iv_return = DB_BROADCAST_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SECONDARY_BAD", 16)) {
+ /* ^ */
+#ifdef DB_SECONDARY_BAD
+ *iv_return = DB_SECONDARY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_DIRECT_LOG DB_ENV_REP_CLIENT DB_ENV_REP_MASTER DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_TXN_NOSYNC DB_ENV_USER_ALLOC DB_GET_BOTH_RANGE
+ DB_LOG_SILENT_ERR DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_PRERENAME DB_TXN_POPENFILES DB_VERSION_STRING */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'A':
+ if (memEQ(name, "DB_TEST_PRERENAME", 17)) {
+ /* ^ */
+#ifdef DB_TEST_PRERENAME
+ *iv_return = DB_TEST_PRERENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_REP_CLIENT", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_CLIENT
+ *iv_return = DB_ENV_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_SILENT_ERR", 17)) {
+ /* ^ */
+#ifdef DB_LOG_SILENT_ERR
+ *iv_return = DB_LOG_SILENT_ERR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RPC_SERVERVERS", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERVERS
+ *iv_return = DB_RPC_SERVERVERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTSEND", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTSEND
+ *iv_return = DB_TEST_ELECTSEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_VERSION_STRING", 17)) {
+ /* ^ */
+#ifdef DB_VERSION_STRING
+ *pv_return = DB_VERSION_STRING;
+ return PERL_constant_ISPV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_DIRECT_LOG", 17)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_LOG
+ *iv_return = DB_ENV_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_USER_ALLOC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_USER_ALLOC
+ *iv_return = DB_ENV_USER_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_POPENFILES", 17)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_TXN_POPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_ENV_SYSTEM_MEM", 17)) {
+ /* ^ */
+#ifdef DB_ENV_SYSTEM_MEM
+ *iv_return = DB_ENV_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_GET_BOTH_RANGE", 17)) {
+ /* ^ */
+#ifdef DB_GET_BOTH_RANGE
+ *iv_return = DB_GET_BOTH_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTINIT", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTINIT
+ *iv_return = DB_TEST_ELECTINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ENV_STANDALONE", 17)) {
+ /* ^ */
+#ifdef DB_ENV_STANDALONE
+ *iv_return = DB_ENV_STANDALONE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RPC_SERVERPROG", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERPROG
+ *iv_return = DB_RPC_SERVERPROG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_REP_MASTER", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_MASTER
+ *iv_return = DB_ENV_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_ENV_TXN_NOSYNC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_TXN_NOSYNC
+ *iv_return = DB_ENV_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_18 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ALREADY_ABORTED DB_ENV_AUTO_COMMIT DB_ENV_OPEN_CALLED
+ DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED DB_MPOOL_NEW_GROUP
+ DB_PR_RECOVERYTEST DB_SET_TXN_TIMEOUT DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2
+ DB_TEST_POSTRENAME DB_TEST_PREDESTROY DB_TEST_PREEXTOPEN */
+ /* Offset 13 gives the best switch position. */
+ switch (name[13]) {
+ case 'A':
+ if (memEQ(name, "DB_ENV_OPEN_CALLED", 18)) {
+ /* ^ */
+#ifdef DB_ENV_OPEN_CALLED
+ *iv_return = DB_ENV_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOTGRANTED", 18)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTGRANTED
+ *iv_return = DB_LOCK_NOTGRANTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTRENAME", 18)) {
+ /* ^ */
+#ifdef DB_TEST_POSTRENAME
+ *iv_return = DB_TEST_POSTRENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_MPOOL_NEW_GROUP", 18)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW_GROUP
+ *iv_return = DB_MPOOL_NEW_GROUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_SET_TXN_TIMEOUT", 18)) {
+ /* ^ */
+#ifdef DB_SET_TXN_TIMEOUT
+ *iv_return = DB_SET_TXN_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ALREADY_ABORTED", 18)) {
+ /* ^ */
+#ifdef DB_ALREADY_ABORTED
+ *iv_return = DB_ALREADY_ABORTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_AUTO_COMMIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_AUTO_COMMIT
+ *iv_return = DB_ENV_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_TEST_PREDESTROY", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREDESTROY
+ *iv_return = DB_TEST_PREDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEST_PREEXTOPEN", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTOPEN
+ *iv_return = DB_TEST_PREEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_TEST_ELECTVOTE1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE1
+ *iv_return = DB_TEST_ELECTVOTE1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTVOTE2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE2
+ *iv_return = DB_TEST_ELECTVOTE2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_TEST_ELECTWAIT1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT1
+ *iv_return = DB_TEST_ELECTWAIT1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTWAIT2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT2
+ *iv_return = DB_TEST_ELECTWAIT2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PR_RECOVERYTEST", 18)) {
+ /* ^ */
+#ifdef DB_PR_RECOVERYTEST
+ *iv_return = DB_PR_RECOVERYTEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_ENV_REGION_INIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_REGION_INIT
+ *iv_return = DB_ENV_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_19 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER DB_LOCK_GET_TIMEOUT
+ DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT DB_REP_HOLDELECTION
+ DB_SET_LOCK_TIMEOUT DB_TEST_POSTDESTROY DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTLOGMETA DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL
+ DB_TXN_LOG_UNDOREDO DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT DB_VERB_REPLICATION */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'C':
+ if (memEQ(name, "DB_SET_LOCK_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_SET_LOCK_TIMEOUT
+ *iv_return = DB_SET_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_GET_TIMEOUT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_GET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_SET_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_SET_TIMEOUT
+ *iv_return = DB_LOCK_SET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_REPLICATION", 19)) {
+ /* ^ */
+#ifdef DB_VERB_REPLICATION
+ *iv_return = DB_VERB_REPLICATION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_TXN_LOG_UNDOREDO", 19)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDOREDO
+ *iv_return = DB_TXN_LOG_UNDOREDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_TXN_WRITE_NOSYNC", 19)) {
+ /* ^ */
+#ifdef DB_TXN_WRITE_NOSYNC
+ *iv_return = DB_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_REP_HOLDELECTION", 19)) {
+ /* ^ */
+#ifdef DB_REP_HOLDELECTION
+ *iv_return = DB_REP_HOLDELECTION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UNRESOLVED_CHILD", 19)) {
+ /* ^ */
+#ifdef DB_UNRESOLVED_CHILD
+ *iv_return = DB_UNRESOLVED_CHILD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTDESTROY
+ *iv_return = DB_TEST_POSTDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTEXTOPEN", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTOPEN
+ *iv_return = DB_TEST_POSTEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOGMETA", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOGMETA
+ *iv_return = DB_TEST_POSTLOGMETA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_REP_LOGSONLY", 19)) {
+ /* ^ */
+#ifdef DB_ENV_REP_LOGSONLY
+ *iv_return = DB_ENV_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_LOCK_FREE_LOCKER", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_FREE_LOCKER
+ *iv_return = DB_LOCK_FREE_LOCKER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_FORWARD_ROLL", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_FORWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIORITY_DEFAULT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_SUBDB_LOCKS", 19)) {
+ /* ^ */
+#ifdef DB_TEST_SUBDB_LOCKS
+ *iv_return = DB_TEST_SUBDB_LOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_USE_ENVIRON_ROOT", 19)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON_ROOT
+ *iv_return = DB_USE_ENVIRON_ROOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_UPDATE_SECONDARY", 19)) {
+ /* ^ */
+#ifdef DB_UPDATE_SECONDARY
+ *iv_return = DB_UPDATE_SECONDARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_20 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CXX_NO_EXCEPTIONS DB_LOGFILEID_INVALID DB_PANIC_ENVIRONMENT
+ DB_PRIORITY_VERY_LOW DB_TEST_PREEXTDELETE DB_TEST_PREEXTUNLINK
+ DB_TXN_BACKWARD_ROLL DB_TXN_LOCK_OPTIMIST */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'D':
+ if (memEQ(name, "DB_TEST_PREEXTDELETE", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTDELETE
+ *iv_return = DB_TEST_PREEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_BACKWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) {
+ /* ^ */
+#ifdef DB_LOGFILEID_INVALID
+ *iv_return = DB_LOGFILEID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_PANIC_ENVIRONMENT", 20)) {
+ /* ^ */
+#ifdef DB_PANIC_ENVIRONMENT
+ *iv_return = DB_PANIC_ENVIRONMENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_CXX_NO_EXCEPTIONS", 20)) {
+ /* ^ */
+#ifdef DB_CXX_NO_EXCEPTIONS
+ *iv_return = DB_CXX_NO_EXCEPTIONS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMIST
+ *iv_return = DB_TXN_LOCK_OPTIMIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_PREEXTUNLINK", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTUNLINK
+ *iv_return = DB_TEST_PREEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_21 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK_UPGRADE_WRITE DB_PRIORITY_VERY_HIGH DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTUNLINK DB_TXN_BACKWARD_ALLOC */
+ /* Offset 16 gives the best switch position. */
+ switch (name[16]) {
+ case 'A':
+ if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_BACKWARD_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTEXTDELETE", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTDELETE
+ *iv_return = DB_TEST_POSTEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_TEST_POSTEXTUNLINK", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTUNLINK
+ *iv_return = DB_TEST_POSTEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_UPGRADE_WRITE", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_LOCK_UPGRADE_WRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return, const char **pv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!/home/paul/perl/install/redhat6.1/5.8.0/bin/perl5.8.0 -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV PV)};
+my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND
+ DB_APPLY_LOGREG DB_APP_INIT DB_ARCH_ABS DB_ARCH_DATA DB_ARCH_LOG
+ DB_AUTO_COMMIT DB_BEFORE DB_BROADCAST_EID DB_BTREEMAGIC
+ DB_BTREEOLDVER DB_BTREEVERSION DB_CACHED_COUNTS DB_CDB_ALLDB
+ DB_CHECKPOINT DB_CHKSUM_SHA1 DB_CLIENT DB_CL_WRITER DB_COMMIT
+ DB_CONSUME DB_CONSUME_WAIT DB_CREATE DB_CURLSN DB_CURRENT
+ DB_CXX_NO_EXCEPTIONS DB_DELETED DB_DELIMITER DB_DIRECT
+ DB_DIRECT_DB DB_DIRECT_LOG DB_DIRTY_READ DB_DONOTINDEX DB_DUP
+ DB_DUPCURSOR DB_DUPSORT DB_EID_BROADCAST DB_EID_INVALID
+ DB_ENCRYPT DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB DB_ENV_CDB_ALLDB DB_ENV_CREATE DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB DB_ENV_DIRECT_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOLOCKING DB_ENV_NOMMAP
+ DB_ENV_NOPANIC DB_ENV_OPEN_CALLED DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK DB_ENV_PRIVATE DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT DB_ENV_REP_LOGSONLY DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT DB_ENV_RPCCLIENT_GIVEN DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_THREAD DB_ENV_TXN DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC DB_ENV_USER_ALLOC DB_ENV_YIELDCPU
+ DB_EXCL DB_EXTENT DB_FAST_STAT DB_FCNTL_LOCKING DB_FILE_ID_LEN
+ DB_FIRST DB_FIXEDLEN DB_FLUSH DB_FORCE DB_GETREC DB_GET_BOTH
+ DB_GET_BOTHC DB_GET_BOTH_RANGE DB_GET_RECNO DB_HANDLE_LOCK
+ DB_HASHMAGIC DB_HASHOLDVER DB_HASHVERSION DB_INCOMPLETE
+ DB_INIT_CDB DB_INIT_LOCK DB_INIT_LOG DB_INIT_MPOOL DB_INIT_TXN
+ DB_INVALID_EID DB_JAVA_CALLBACK DB_JOINENV DB_JOIN_ITEM
+ DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_KEYLAST
+ DB_LAST DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK DB_LOCK_DEFAULT DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE DB_LOCK_NORUN DB_LOCK_NOTEXIST
+ DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_RIW_N
+ DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT DB_LOCK_SWITCH DB_LOCK_UPGRADE
+ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_LOGFILEID_INVALID
+ DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION DB_LOG_DISK DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR DB_MAX_PAGES DB_MAX_RECORDS DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT
+ DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP DB_MPOOL_PRIVATE
+ DB_MULTIPLE DB_MULTIPLE_KEY DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEEDSPLIT DB_NEXT DB_NEXT_DUP DB_NEXT_NODUP DB_NOCOPY
+ DB_NODUPDATA DB_NOLOCKING DB_NOMMAP DB_NOORDERCHK DB_NOOVERWRITE
+ DB_NOPANIC DB_NORECURSE DB_NOSERVER DB_NOSERVER_HOME
+ DB_NOSERVER_ID DB_NOSYNC DB_NOTFOUND DB_ODDFILESIZE DB_OK_BTREE
+ DB_OK_HASH DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_OVERWRITE DB_PAD DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PAGE_NOTFOUND DB_PANIC_ENVIRONMENT DB_PERMANENT
+ DB_POSITION DB_POSITIONI DB_PREV DB_PREV_NODUP DB_PRINTABLE
+ DB_PRIVATE DB_PR_HEADERS DB_PR_PAGE DB_PR_RECOVERYTEST
+ DB_QAMMAGIC DB_QAMOLDVER DB_QAMVERSION DB_RDONLY DB_RDWRMASTER
+ DB_RECNUM DB_RECORDCOUNT DB_RECORD_LOCK DB_RECOVER
+ DB_RECOVER_FATAL DB_REGION_ANON DB_REGION_INIT DB_REGION_MAGIC
+ DB_REGION_NAME DB_REGISTERED DB_RENAMEMAGIC DB_RENUMBER
+ DB_REP_CLIENT DB_REP_DUPMASTER DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER DB_REP_NEWSITE
+ DB_REP_OUTDATED DB_REP_PERMANENT DB_REP_UNAVAIL DB_REVSPLITOFF
+ DB_RMW DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_RUNRECOVERY
+ DB_SALVAGE DB_SECONDARY_BAD DB_SEQUENTIAL DB_SET
+ DB_SET_LOCK_TIMEOUT DB_SET_RANGE DB_SET_RECNO DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_CLEAR DB_SURPRISE_KID
+ DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE DB_TEST_POSTEXTOPEN DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG DB_TEST_POSTLOGMETA DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME DB_TEST_POSTSYNC DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE DB_TEST_PREEXTOPEN DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN DB_TEST_PRERENAME DB_TEST_SUBDB_LOCKS DB_THREAD
+ DB_TIMEOUT DB_TRUNCATE DB_TXNMAGIC DB_TXNVERSION DB_TXN_CKP
+ DB_TXN_LOCK DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST DB_TXN_LOCK_OPTIMISTIC DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD DB_UPDATE_SECONDARY
+ DB_UPGRADE DB_USE_ENVIRON DB_USE_ENVIRON_ROOT DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR DB_VERIFY DB_VERIFY_BAD DB_VERIFY_FATAL
+ DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK DB_WRITEOPEN
+ DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU),
+ {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_INHERIT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \\\n DB_VERSION_PATCH >= 1)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_QUEUE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 55)\n", "#endif\n"]},
+ {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_GETPGNOS", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_VERSION_STRING", type=>"PV"});
+
+print constant_types(); # macro defs
+foreach (C_constant ("BerkeleyDB", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("BerkeleyDB", $types);
+__END__
+ */
+
+ switch (len) {
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ return constant_12 (aTHX_ name, iv_return);
+ break;
+ case 13:
+ return constant_13 (aTHX_ name, iv_return);
+ break;
+ case 14:
+ return constant_14 (aTHX_ name, iv_return);
+ break;
+ case 15:
+ return constant_15 (aTHX_ name, iv_return);
+ break;
+ case 16:
+ return constant_16 (aTHX_ name, iv_return);
+ break;
+ case 17:
+ return constant_17 (aTHX_ name, iv_return, pv_return);
+ break;
+ case 18:
+ return constant_18 (aTHX_ name, iv_return);
+ break;
+ case 19:
+ return constant_19 (aTHX_ name, iv_return);
+ break;
+ case 20:
+ return constant_20 (aTHX_ name, iv_return);
+ break;
+ case 21:
+ return constant_21 (aTHX_ name, iv_return);
+ break;
+ case 22:
+ /* Names all of length 22. */
+ /* DB_ENV_RPCCLIENT_GIVEN DB_TXN_LOCK_OPTIMISTIC */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'O':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMISTIC", 22)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMISTIC
+ *iv_return = DB_TXN_LOCK_OPTIMISTIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_RPCCLIENT_GIVEN", 22)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT_GIVEN
+ *iv_return = DB_ENV_RPCCLIENT_GIVEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ case 23:
+ if (memEQ(name, "DB_ENV_TXN_WRITE_NOSYNC", 23)) {
+#ifdef DB_ENV_TXN_WRITE_NOSYNC
+ *iv_return = DB_ENV_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/constants.xs b/storage/bdb/perl/BerkeleyDB/constants.xs
new file mode 100644
index 00000000000..1b2c8b2c3c8
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/constants.xs
@@ -0,0 +1,87 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ const char *pv;
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv, &pv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid BerkeleyDB macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined BerkeleyDB macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break;
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing BerkeleyDB macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/storage/bdb/perl/BerkeleyDB/dbinfo b/storage/bdb/perl/BerkeleyDB/dbinfo
new file mode 100755
index 00000000000..af2c45facf5
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/dbinfo
@@ -0,0 +1,112 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl b/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
new file mode 100644
index 00000000000..6d7faeed2e2
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lpthreads" ];
diff --git a/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl b/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
new file mode 100644
index 00000000000..b531673e6e0
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lthread" ];
diff --git a/storage/bdb/perl/BerkeleyDB/hints/solaris.pl b/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
new file mode 100644
index 00000000000..ddd941d634a
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/hints/solaris.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lmt" ];
diff --git a/storage/bdb/perl/BerkeleyDB/mkconsts b/storage/bdb/perl/BerkeleyDB/mkconsts
new file mode 100644
index 00000000000..7e0964333cc
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/mkconsts
@@ -0,0 +1,770 @@
+#!/usr/bin/perl
+
+use ExtUtils::Constant qw(WriteConstants);
+
+use constant DEFINE => 'define' ;
+use constant STRING => 'string' ;
+use constant IGNORE => 'ignore' ;
+
+%constants = (
+
+ #########
+ # 2.0.0
+ #########
+
+ DBM_INSERT => IGNORE,
+ DBM_REPLACE => IGNORE,
+ DBM_SUFFIX => IGNORE,
+ DB_AFTER => DEFINE,
+ DB_AM_DUP => IGNORE,
+ DB_AM_INMEM => IGNORE,
+ DB_AM_LOCKING => IGNORE,
+ DB_AM_LOGGING => IGNORE,
+ DB_AM_MLOCAL => IGNORE,
+ DB_AM_PGDEF => IGNORE,
+ DB_AM_RDONLY => IGNORE,
+ DB_AM_RECOVER => IGNORE,
+ DB_AM_SWAP => IGNORE,
+ DB_AM_TXN => IGNORE,
+ DB_APP_INIT => DEFINE,
+ DB_BEFORE => DEFINE,
+ DB_BTREEMAGIC => DEFINE,
+ DB_BTREEVERSION => DEFINE,
+ DB_BT_DELIMITER => IGNORE,
+ DB_BT_EOF => IGNORE,
+ DB_BT_FIXEDLEN => IGNORE,
+ DB_BT_PAD => IGNORE,
+ DB_BT_SNAPSHOT => IGNORE,
+ DB_CHECKPOINT => DEFINE,
+ DB_CREATE => DEFINE,
+ DB_CURRENT => DEFINE,
+ DB_DBT_INTERNAL => IGNORE,
+ DB_DBT_MALLOC => IGNORE,
+ DB_DBT_PARTIAL => IGNORE,
+ DB_DBT_USERMEM => IGNORE,
+ DB_DELETED => DEFINE,
+ DB_DELIMITER => DEFINE,
+ DB_DUP => DEFINE,
+ DB_EXCL => DEFINE,
+ DB_FIRST => DEFINE,
+ DB_FIXEDLEN => DEFINE,
+ DB_FLUSH => DEFINE,
+ DB_HASHMAGIC => DEFINE,
+ DB_HASHVERSION => DEFINE,
+ DB_HS_DIRTYMETA => IGNORE,
+ DB_INCOMPLETE => DEFINE,
+ DB_INIT_LOCK => DEFINE,
+ DB_INIT_LOG => DEFINE,
+ DB_INIT_MPOOL => DEFINE,
+ DB_INIT_TXN => DEFINE,
+ DB_KEYEXIST => DEFINE,
+ DB_KEYFIRST => DEFINE,
+ DB_KEYLAST => DEFINE,
+ DB_LAST => DEFINE,
+ DB_LOCKMAGIC => DEFINE,
+ DB_LOCKVERSION => DEFINE,
+ DB_LOCK_DEADLOCK => DEFINE,
+ DB_LOCK_NOTGRANTED => DEFINE,
+ DB_LOCK_NOTHELD => DEFINE,
+ DB_LOCK_NOWAIT => DEFINE,
+ DB_LOCK_RIW_N => DEFINE,
+ DB_LOCK_RW_N => DEFINE,
+ DB_LOGMAGIC => DEFINE,
+ DB_LOGVERSION => DEFINE,
+ DB_MAX_PAGES => DEFINE,
+ DB_MAX_RECORDS => DEFINE,
+ DB_MPOOL_CLEAN => DEFINE,
+ DB_MPOOL_CREATE => DEFINE,
+ DB_MPOOL_DIRTY => DEFINE,
+ DB_MPOOL_DISCARD => DEFINE,
+ DB_MPOOL_LAST => DEFINE,
+ DB_MPOOL_NEW => DEFINE,
+ DB_MPOOL_PRIVATE => DEFINE,
+ DB_MUTEXDEBUG => DEFINE,
+ DB_NEEDSPLIT => DEFINE,
+ DB_NEXT => DEFINE,
+ DB_NOOVERWRITE => DEFINE,
+ DB_NORECURSE => DEFINE,
+ DB_NOSYNC => DEFINE,
+ DB_NOTFOUND => DEFINE,
+ DB_PAD => DEFINE,
+ DB_PREV => DEFINE,
+ DB_RDONLY => DEFINE,
+ DB_REGISTERED => DEFINE,
+ DB_RE_MODIFIED => IGNORE,
+ DB_SET => DEFINE,
+ DB_SET_RANGE => DEFINE,
+ DB_SNAPSHOT => DEFINE,
+ DB_SWAPBYTES => DEFINE,
+ DB_TRUNCATE => DEFINE,
+ DB_TXNMAGIC => DEFINE,
+ DB_TXNVERSION => DEFINE,
+ DB_TXN_BACKWARD_ROLL => DEFINE,
+ DB_TXN_FORWARD_ROLL => DEFINE,
+ DB_TXN_LOCK_2PL => DEFINE,
+ DB_TXN_LOCK_MASK => DEFINE,
+ DB_TXN_LOCK_OPTIMISTIC => DEFINE,
+ DB_TXN_LOG_MASK => DEFINE,
+ DB_TXN_LOG_REDO => DEFINE,
+ DB_TXN_LOG_UNDO => DEFINE,
+ DB_TXN_LOG_UNDOREDO => DEFINE,
+ DB_TXN_OPENFILES => DEFINE,
+ DB_TXN_REDO => DEFINE,
+ DB_TXN_UNDO => DEFINE,
+ DB_USE_ENVIRON => DEFINE,
+ DB_USE_ENVIRON_ROOT => DEFINE,
+ DB_VERSION_MAJOR => DEFINE,
+ DB_VERSION_MINOR => DEFINE,
+ DB_VERSION_PATCH => DEFINE,
+ DB_VERSION_STRING => STRING,
+ _DB_H_ => IGNORE,
+ __BIT_TYPES_DEFINED__ => IGNORE,
+ const => IGNORE,
+
+ # enum DBTYPE
+ DB_BTREE => '2.0.0',
+ DB_HASH => '2.0.0',
+ DB_RECNO => '2.0.0',
+ DB_UNKNOWN => '2.0.0',
+
+ # enum db_lockop_t
+ DB_LOCK_DUMP => '2.0.0',
+ DB_LOCK_GET => '2.0.0',
+ DB_LOCK_PUT => '2.0.0',
+ DB_LOCK_PUT_ALL => '2.0.0',
+ DB_LOCK_PUT_OBJ => '2.0.0',
+
+ # enum db_lockmode_t
+ DB_LOCK_NG => IGNORE, # 2.0.0
+ DB_LOCK_READ => IGNORE, # 2.0.0
+ DB_LOCK_WRITE => IGNORE, # 2.0.0
+ DB_LOCK_IREAD => IGNORE, # 2.0.0
+ DB_LOCK_IWRITE => IGNORE, # 2.0.0
+ DB_LOCK_IWR => IGNORE, # 2.0.0
+
+ # enum ACTION
+ FIND => IGNORE, # 2.0.0
+ ENTER => IGNORE, # 2.0.0
+
+ #########
+ # 2.0.3
+ #########
+
+ DB_SEQUENTIAL => DEFINE,
+ DB_TEMPORARY => DEFINE,
+
+ #########
+ # 2.1.0
+ #########
+
+ DB_NOMMAP => DEFINE,
+
+ #########
+ # 2.2.6
+ #########
+
+ DB_AM_THREAD => IGNORE,
+ DB_ARCH_ABS => DEFINE,
+ DB_ARCH_DATA => DEFINE,
+ DB_ARCH_LOG => DEFINE,
+ DB_LOCK_CONFLICT => DEFINE,
+ DB_LOCK_DEFAULT => DEFINE,
+ DB_LOCK_NORUN => DEFINE,
+ DB_LOCK_OLDEST => DEFINE,
+ DB_LOCK_RANDOM => DEFINE,
+ DB_LOCK_YOUNGEST => DEFINE,
+ DB_RECOVER => DEFINE,
+ DB_RECOVER_FATAL => DEFINE,
+ DB_THREAD => DEFINE,
+ DB_TXN_NOSYNC => DEFINE,
+
+ #########
+ # 2.3.0
+ #########
+
+ DB_BTREEOLDVER => DEFINE,
+ DB_BT_RECNUM => IGNORE,
+ DB_FILE_ID_LEN => DEFINE,
+ DB_GETREC => DEFINE,
+ DB_HASHOLDVER => DEFINE,
+ DB_KEYEMPTY => DEFINE,
+ DB_LOGOLDVER => DEFINE,
+ DB_RECNUM => DEFINE,
+ DB_RECORDCOUNT => DEFINE,
+ DB_RENUMBER => DEFINE,
+ DB_RE_DELIMITER => IGNORE,
+ DB_RE_FIXEDLEN => IGNORE,
+ DB_RE_PAD => IGNORE,
+ DB_RE_RENUMBER => IGNORE,
+ DB_RE_SNAPSHOT => IGNORE,
+
+ #########
+ # 2.3.1
+ #########
+
+ DB_GET_RECNO => DEFINE,
+ DB_SET_RECNO => DEFINE,
+
+ #########
+ # 2.3.3
+ #########
+
+ DB_APPEND => DEFINE,
+
+ #########
+ # 2.3.6
+ #########
+
+ DB_TXN_CKP => DEFINE,
+
+ #########
+ # 2.3.11
+ #########
+
+ DB_ENV_APPINIT => DEFINE,
+ DB_ENV_STANDALONE => DEFINE,
+ DB_ENV_THREAD => DEFINE,
+
+ #########
+ # 2.3.12
+ #########
+
+ DB_FUNC_CALLOC => IGNORE,
+ DB_FUNC_CLOSE => IGNORE,
+ DB_FUNC_DIRFREE => IGNORE,
+ DB_FUNC_DIRLIST => IGNORE,
+ DB_FUNC_EXISTS => IGNORE,
+ DB_FUNC_FREE => IGNORE,
+ DB_FUNC_FSYNC => IGNORE,
+ DB_FUNC_IOINFO => IGNORE,
+ DB_FUNC_MALLOC => IGNORE,
+ DB_FUNC_MAP => IGNORE,
+ DB_FUNC_OPEN => IGNORE,
+ DB_FUNC_READ => IGNORE,
+ DB_FUNC_REALLOC => IGNORE,
+ DB_FUNC_SEEK => IGNORE,
+ DB_FUNC_SLEEP => IGNORE,
+ DB_FUNC_STRDUP => IGNORE,
+ DB_FUNC_UNLINK => IGNORE,
+ DB_FUNC_UNMAP => IGNORE,
+ DB_FUNC_WRITE => IGNORE,
+ DB_FUNC_YIELD => IGNORE,
+
+ #########
+ # 2.3.14
+ #########
+
+ DB_TSL_SPINS => IGNORE,
+
+ #########
+ # 2.3.16
+ #########
+
+ DB_DBM_HSEARCH => IGNORE,
+ firstkey => IGNORE,
+ hdestroy => IGNORE,
+
+ #########
+ # 2.4.10
+ #########
+
+ DB_CURLSN => DEFINE,
+ DB_FUNC_RUNLINK => IGNORE,
+ DB_REGION_ANON => DEFINE,
+ DB_REGION_INIT => DEFINE,
+ DB_REGION_NAME => DEFINE,
+ DB_TXN_LOCK_OPTIMIST => DEFINE,
+ __CURRENTLY_UNUSED => IGNORE,
+
+ # enum db_status_t
+ DB_LSTAT_ABORTED => IGNORE, # 2.4.10
+ DB_LSTAT_ERR => IGNORE, # 2.4.10
+ DB_LSTAT_FREE => IGNORE, # 2.4.10
+ DB_LSTAT_HELD => IGNORE, # 2.4.10
+ DB_LSTAT_NOGRANT => IGNORE, # 2.4.10
+ DB_LSTAT_PENDING => IGNORE, # 2.4.10
+ DB_LSTAT_WAITING => IGNORE, # 2.4.10
+
+ #########
+ # 2.4.14
+ #########
+
+ DB_MUTEXLOCKS => DEFINE,
+ DB_PAGEYIELD => DEFINE,
+ __UNUSED_100 => IGNORE,
+ __UNUSED_4000 => IGNORE,
+
+ #########
+ # 2.5.2
+ #########
+
+ DBC_CONTINUE => IGNORE,
+ DBC_KEYSET => IGNORE,
+ DBC_RECOVER => IGNORE,
+ DBC_RMW => IGNORE,
+ DB_DBM_ERROR => IGNORE,
+ DB_GET_BOTH => DEFINE,
+ DB_NEXT_DUP => DEFINE,
+ DB_OPFLAGS_MASK => DEFINE,
+ DB_RMW => DEFINE,
+ DB_RUNRECOVERY => DEFINE,
+ dbmclose => IGNORE,
+
+ #########
+ # 2.5.9
+ #########
+
+ DB_DUPSORT => DEFINE,
+ DB_JOIN_ITEM => DEFINE,
+
+ #########
+ # 2.6.4
+ #########
+
+ DBC_WRITER => IGNORE,
+ DB_AM_CDB => IGNORE,
+ DB_ENV_CDB => DEFINE,
+ DB_INIT_CDB => DEFINE,
+ DB_LOCK_UPGRADE => DEFINE,
+ DB_WRITELOCK => DEFINE,
+
+ #########
+ # 2.7.1
+ #########
+
+
+ # enum db_lockop_t
+ DB_LOCK_INHERIT => '2.7.1',
+
+ #########
+ # 2.7.7
+ #########
+
+ DB_FCNTL_LOCKING => DEFINE,
+
+ #########
+ # 3.0.55
+ #########
+
+ DBC_WRITECURSOR => IGNORE,
+ DB_AM_DISCARD => IGNORE,
+ DB_AM_SUBDB => IGNORE,
+ DB_BT_REVSPLIT => IGNORE,
+ DB_CONSUME => DEFINE,
+ DB_CXX_NO_EXCEPTIONS => DEFINE,
+ DB_DBT_REALLOC => IGNORE,
+ DB_DUPCURSOR => DEFINE,
+ DB_ENV_CREATE => DEFINE,
+ DB_ENV_DBLOCAL => DEFINE,
+ DB_ENV_LOCKDOWN => DEFINE,
+ DB_ENV_LOCKING => DEFINE,
+ DB_ENV_LOGGING => DEFINE,
+ DB_ENV_NOMMAP => DEFINE,
+ DB_ENV_OPEN_CALLED => DEFINE,
+ DB_ENV_PRIVATE => DEFINE,
+ DB_ENV_SYSTEM_MEM => DEFINE,
+ DB_ENV_TXN => DEFINE,
+ DB_ENV_TXN_NOSYNC => DEFINE,
+ DB_ENV_USER_ALLOC => DEFINE,
+ DB_FORCE => DEFINE,
+ DB_LOCKDOWN => DEFINE,
+ DB_LOCK_RECORD => DEFINE,
+ DB_LOGFILEID_INVALID => DEFINE,
+ DB_MPOOL_NEW_GROUP => DEFINE,
+ DB_NEXT_NODUP => DEFINE,
+ DB_OK_BTREE => DEFINE,
+ DB_OK_HASH => DEFINE,
+ DB_OK_QUEUE => DEFINE,
+ DB_OK_RECNO => DEFINE,
+ DB_OLD_VERSION => DEFINE,
+ DB_OPEN_CALLED => DEFINE,
+ DB_PAGE_LOCK => DEFINE,
+ DB_POSITION => DEFINE,
+ DB_POSITIONI => DEFINE,
+ DB_PRIVATE => DEFINE,
+ DB_QAMMAGIC => DEFINE,
+ DB_QAMOLDVER => DEFINE,
+ DB_QAMVERSION => DEFINE,
+ DB_RECORD_LOCK => DEFINE,
+ DB_REVSPLITOFF => DEFINE,
+ DB_SYSTEM_MEM => DEFINE,
+ DB_TEST_POSTLOG => DEFINE,
+ DB_TEST_POSTLOGMETA => DEFINE,
+ DB_TEST_POSTOPEN => DEFINE,
+ DB_TEST_POSTRENAME => DEFINE,
+ DB_TEST_POSTSYNC => DEFINE,
+ DB_TEST_PREOPEN => DEFINE,
+ DB_TEST_PRERENAME => DEFINE,
+ DB_TXN_NOWAIT => DEFINE,
+ DB_TXN_SYNC => DEFINE,
+ DB_UPGRADE => DEFINE,
+ DB_VERB_CHKPOINT => DEFINE,
+ DB_VERB_DEADLOCK => DEFINE,
+ DB_VERB_RECOVERY => DEFINE,
+ DB_VERB_WAITSFOR => DEFINE,
+ DB_WRITECURSOR => DEFINE,
+ DB_XA_CREATE => DEFINE,
+
+ # enum DBTYPE
+ DB_QUEUE => '3.0.55',
+
+ #########
+ # 3.1.12
+ #########
+
+ DBC_ACTIVE => IGNORE,
+ DBC_OPD => IGNORE,
+ DBC_TRANSIENT => IGNORE,
+ DBC_WRITEDUP => IGNORE,
+ DB_AGGRESSIVE => DEFINE,
+ DB_AM_DUPSORT => IGNORE,
+ DB_CACHED_COUNTS => DEFINE,
+ DB_CLIENT => DEFINE,
+ DB_DBT_DUPOK => IGNORE,
+ DB_DBT_ISSET => IGNORE,
+ DB_ENV_RPCCLIENT => DEFINE,
+ DB_GET_BOTHC => DEFINE,
+ DB_JOIN_NOSORT => DEFINE,
+ DB_NODUPDATA => DEFINE,
+ DB_NOORDERCHK => DEFINE,
+ DB_NOSERVER => DEFINE,
+ DB_NOSERVER_HOME => DEFINE,
+ DB_NOSERVER_ID => DEFINE,
+ DB_ODDFILESIZE => DEFINE,
+ DB_ORDERCHKONLY => DEFINE,
+ DB_PREV_NODUP => DEFINE,
+ DB_PR_HEADERS => DEFINE,
+ DB_PR_PAGE => DEFINE,
+ DB_PR_RECOVERYTEST => DEFINE,
+ DB_RDWRMASTER => DEFINE,
+ DB_SALVAGE => DEFINE,
+ DB_VERIFY_BAD => DEFINE,
+ DB_VERIFY_FATAL => DEFINE,
+ DB_VRFY_FLAGMASK => DEFINE,
+
+ # enum db_recops
+ DB_TXN_ABORT => '3.1.12',
+ DB_TXN_BACKWARD_ROLL => '3.1.12',
+ DB_TXN_FORWARD_ROLL => '3.1.12',
+ DB_TXN_OPENFILES => '3.1.12',
+
+ #########
+ # 3.2.3
+ #########
+
+ DBC_COMPENSATE => IGNORE,
+ DB_AM_VERIFYING => IGNORE,
+ DB_CDB_ALLDB => DEFINE,
+ DB_ENV_CDB_ALLDB => DEFINE,
+ DB_EXTENT => DEFINE,
+ DB_JOINENV => DEFINE,
+ DB_LOCK_SWITCH => DEFINE,
+ DB_MPOOL_EXTENT => DEFINE,
+ DB_REGION_MAGIC => DEFINE,
+ DB_UNRESOLVED_CHILD => DEFINE,
+ DB_VERIFY => DEFINE,
+
+ # enum db_notices
+ DB_NOTICE_LOGFILE_CHANGED => IGNORE, # 3.2.3
+
+ #########
+ # 3.2.6
+ #########
+
+ DB_ALREADY_ABORTED => DEFINE,
+ DB_CONSUME_WAIT => DEFINE,
+ DB_JAVA_CALLBACK => DEFINE,
+ DB_TEST_POSTEXTDELETE => DEFINE,
+ DB_TEST_POSTEXTOPEN => DEFINE,
+ DB_TEST_POSTEXTUNLINK => DEFINE,
+ DB_TEST_PREEXTDELETE => DEFINE,
+ DB_TEST_PREEXTOPEN => DEFINE,
+ DB_TEST_PREEXTUNLINK => DEFINE,
+
+ # enum db_lockmode_t
+ DB_LOCK_WAIT => IGNORE, # 3.2.6
+
+ #########
+ # 3.3.4
+ #########
+
+ DBC_DIRTY_READ => IGNORE,
+ DBC_MULTIPLE => IGNORE,
+ DBC_MULTIPLE_KEY => IGNORE,
+ DB_AM_DIRTY => IGNORE,
+ DB_AM_SECONDARY => IGNORE,
+ DB_COMMIT => DEFINE,
+ DB_DBT_APPMALLOC => IGNORE,
+ DB_DIRTY_READ => DEFINE,
+ DB_DONOTINDEX => DEFINE,
+ DB_ENV_PANIC_OK => DEFINE,
+ DB_ENV_RPCCLIENT_GIVEN => DEFINE,
+ DB_FAST_STAT => DEFINE,
+ DB_LOCK_MAXLOCKS => DEFINE,
+ DB_LOCK_MINLOCKS => DEFINE,
+ DB_LOCK_MINWRITE => DEFINE,
+ DB_MULTIPLE => DEFINE,
+ DB_MULTIPLE_KEY => DEFINE,
+ DB_PAGE_NOTFOUND => DEFINE,
+ DB_RPC_SERVERPROG => DEFINE,
+ DB_RPC_SERVERVERS => DEFINE,
+ DB_UPDATE_SECONDARY => DEFINE,
+ DB_XIDDATASIZE => DEFINE,
+
+ # enum db_recops
+ DB_TXN_POPENFILES => '3.3.4',
+
+ # enum db_lockop_t
+ DB_LOCK_UPGRADE_WRITE => '3.3.4',
+
+ # enum db_lockmode_t
+ DB_LOCK_DIRTY => IGNORE, # 3.3.4
+ DB_LOCK_WWRITE => IGNORE, # 3.3.4
+
+ #########
+ # 3.3.11
+ #########
+
+ DB_SECONDARY_BAD => DEFINE,
+ DB_SURPRISE_KID => DEFINE,
+ DB_TEST_POSTDESTROY => DEFINE,
+ DB_TEST_PREDESTROY => DEFINE,
+
+ #########
+ # 4.0.7
+ #########
+
+ DB_APPLY_LOGREG => DEFINE,
+ DB_BROADCAST_EID => DEFINE,
+ DB_CL_WRITER => DEFINE,
+ DB_ENV_NOLOCKING => DEFINE,
+ DB_ENV_NOPANIC => DEFINE,
+ DB_ENV_REGION_INIT => DEFINE,
+ DB_ENV_REP_CLIENT => DEFINE,
+ DB_ENV_REP_LOGSONLY => DEFINE,
+ DB_ENV_REP_MASTER => DEFINE,
+ DB_ENV_YIELDCPU => DEFINE,
+ DB_GET_BOTH_RANGE => DEFINE,
+ DB_INVALID_EID => DEFINE,
+ DB_LOCK_EXPIRE => DEFINE,
+ DB_LOCK_FREE_LOCKER => DEFINE,
+ DB_LOCK_SET_TIMEOUT => DEFINE,
+ DB_LOGC_BUF_SIZE => DEFINE,
+ DB_LOG_DISK => DEFINE,
+ DB_LOG_LOCKED => DEFINE,
+ DB_LOG_SILENT_ERR => DEFINE,
+ DB_NOLOCKING => DEFINE,
+ DB_NOPANIC => DEFINE,
+ DB_PANIC_ENVIRONMENT => DEFINE,
+ DB_REP_CLIENT => DEFINE,
+ DB_REP_DUPMASTER => DEFINE,
+ DB_REP_HOLDELECTION => DEFINE,
+ DB_REP_LOGSONLY => DEFINE,
+ DB_REP_MASTER => DEFINE,
+ DB_REP_NEWMASTER => DEFINE,
+ DB_REP_NEWSITE => DEFINE,
+ DB_REP_OUTDATED => DEFINE,
+ DB_REP_PERMANENT => DEFINE,
+ DB_REP_UNAVAIL => DEFINE,
+ DB_SET_LOCK_TIMEOUT => DEFINE,
+ DB_SET_TXN_NOW => DEFINE,
+ DB_SET_TXN_TIMEOUT => DEFINE,
+ DB_STAT_CLEAR => DEFINE,
+ DB_TIMEOUT => DEFINE,
+ DB_YIELDCPU => DEFINE,
+ MP_FLUSH => IGNORE,
+ MP_OPEN_CALLED => IGNORE,
+ MP_READONLY => IGNORE,
+ MP_UPGRADE => IGNORE,
+ MP_UPGRADE_FAIL => IGNORE,
+ TXN_CHILDCOMMIT => IGNORE,
+ TXN_COMPENSATE => IGNORE,
+ TXN_DIRTY_READ => IGNORE,
+ TXN_LOCKTIMEOUT => IGNORE,
+ TXN_MALLOC => IGNORE,
+ TXN_NOSYNC => IGNORE,
+ TXN_NOWAIT => IGNORE,
+ TXN_SYNC => IGNORE,
+
+ # enum db_recops
+ DB_TXN_APPLY => '4.0.7',
+
+ # enum db_lockop_t
+ DB_LOCK_GET_TIMEOUT => '4.0.7',
+ DB_LOCK_PUT_READ => '4.0.7',
+ DB_LOCK_TIMEOUT => '4.0.7',
+
+ # enum db_status_t
+ DB_LSTAT_EXPIRED => IGNORE, # 4.0.7
+
+ #########
+ # 4.0.14
+ #########
+
+ DB_EID_BROADCAST => DEFINE,
+ DB_EID_INVALID => DEFINE,
+ DB_VERB_REPLICATION => DEFINE,
+
+ #########
+ # 4.1.17
+ #########
+
+ DBC_OWN_LID => IGNORE,
+ DB_AM_CHKSUM => IGNORE,
+ DB_AM_CL_WRITER => IGNORE,
+ DB_AM_COMPENSATE => IGNORE,
+ DB_AM_CREATED => IGNORE,
+ DB_AM_CREATED_MSTR => IGNORE,
+ DB_AM_DBM_ERROR => IGNORE,
+ DB_AM_DELIMITER => IGNORE,
+ DB_AM_ENCRYPT => IGNORE,
+ DB_AM_FIXEDLEN => IGNORE,
+ DB_AM_IN_RENAME => IGNORE,
+ DB_AM_OPEN_CALLED => IGNORE,
+ DB_AM_PAD => IGNORE,
+ DB_AM_RECNUM => IGNORE,
+ DB_AM_RENUMBER => IGNORE,
+ DB_AM_REVSPLITOFF => IGNORE,
+ DB_AM_SNAPSHOT => IGNORE,
+ DB_AUTO_COMMIT => DEFINE,
+ DB_CHKSUM_SHA1 => DEFINE,
+ DB_DIRECT => DEFINE,
+ DB_DIRECT_DB => DEFINE,
+ DB_DIRECT_LOG => DEFINE,
+ DB_ENCRYPT => DEFINE,
+ DB_ENCRYPT_AES => DEFINE,
+ DB_ENV_AUTO_COMMIT => DEFINE,
+ DB_ENV_DIRECT_DB => DEFINE,
+ DB_ENV_DIRECT_LOG => DEFINE,
+ DB_ENV_FATAL => DEFINE,
+ DB_ENV_OVERWRITE => DEFINE,
+ DB_ENV_TXN_WRITE_NOSYNC => DEFINE,
+ DB_HANDLE_LOCK => DEFINE,
+ DB_LOCK_NOTEXIST => DEFINE,
+ DB_LOCK_REMOVE => DEFINE,
+ DB_NOCOPY => DEFINE,
+ DB_OVERWRITE => DEFINE,
+ DB_PERMANENT => DEFINE,
+ DB_PRINTABLE => DEFINE,
+ DB_RENAMEMAGIC => DEFINE,
+ DB_TEST_ELECTINIT => DEFINE,
+ DB_TEST_ELECTSEND => DEFINE,
+ DB_TEST_ELECTVOTE1 => DEFINE,
+ DB_TEST_ELECTVOTE2 => DEFINE,
+ DB_TEST_ELECTWAIT1 => DEFINE,
+ DB_TEST_ELECTWAIT2 => DEFINE,
+ DB_TEST_SUBDB_LOCKS => DEFINE,
+ DB_TXN_LOCK => DEFINE,
+ DB_TXN_WRITE_NOSYNC => DEFINE,
+ DB_WRITEOPEN => DEFINE,
+ DB_WRNOSYNC => DEFINE,
+ _DB_EXT_PROT_IN_ => IGNORE,
+
+ # enum db_lockop_t
+ DB_LOCK_TRADE => '4.1.17',
+
+ # enum db_status_t
+ DB_LSTAT_NOTEXIST => IGNORE, # 4.1.17
+
+ # enum DB_CACHE_PRIORITY
+ DB_PRIORITY_VERY_LOW => '4.1.17',
+ DB_PRIORITY_LOW => '4.1.17',
+ DB_PRIORITY_DEFAULT => '4.1.17',
+ DB_PRIORITY_HIGH => '4.1.17',
+ DB_PRIORITY_VERY_HIGH => '4.1.17',
+
+ # enum db_recops
+ DB_TXN_BACKWARD_ALLOC => '4.1.17',
+ DB_TXN_GETPGNOS => '4.1.17',
+ DB_TXN_PRINT => '4.1.17',
+
+ ) ;
+
+sub enum_Macro
+{
+ my $str = shift ;
+ my ($major, $minor, $patch) = split /\./, $str ;
+
+ my $macro =
+ "#if (DB_VERSION_MAJOR > $major) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR > $minor) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR == $minor && \\\n" .
+ " DB_VERSION_PATCH >= $patch)\n" ;
+
+ return $macro;
+
+}
+
+sub OutputXS
+{
+
+ my @names = () ;
+
+ foreach my $key (sort keys %constants)
+ {
+ my $val = $constants{$key} ;
+ next if $val eq IGNORE;
+
+ if ($val eq STRING)
+ { push @names, { name => $key, type => "PV" } }
+ elsif ($val eq DEFINE)
+ { push @names, $key }
+ else
+ { push @names, { name => $key, macro => [enum_Macro($val), "#endif\n"] } }
+ }
+
+ warn "Updating constants.xs & constants.h...\n";
+ WriteConstants(
+ NAME => BerkeleyDB,
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+ ) ;
+}
+
+sub OutputPM
+{
+ my $filename = 'BerkeleyDB.pm';
+ warn "Updating $filename...\n";
+ open IN, "<$filename" || die "Cannot open $filename: $!\n";
+ open OUT, ">$filename.tmp" || die "Cannot open $filename.tmp: $!\n";
+
+ my $START = '@EXPORT = qw(' ;
+ my $START_re = quotemeta $START ;
+ my $END = ');';
+ my $END_re = quotemeta $END ;
+
+ # skip to the @EXPORT declaration
+ OUTER: while (<IN>)
+ {
+ if ( /^\s*$START_re/ )
+ {
+ # skip to the end marker.
+ while (<IN>)
+ { last OUTER if /^\s*$END_re/ }
+ }
+ print OUT ;
+ }
+
+ print OUT "$START\n";
+ foreach my $key (sort keys %constants)
+ {
+ next if $constants{$key} eq IGNORE;
+ print OUT "\t$key\n";
+ }
+ print OUT "\t$END\n";
+
+ while (<IN>)
+ {
+ print OUT ;
+ }
+
+ close IN;
+ close OUT;
+
+ rename $filename, "$filename.bak" || die "Cannot rename $filename: $!\n" ;
+ rename "$filename.tmp", $filename || die "Cannot rename $filename.tmp: $!\n" ;
+}
+
+OutputXS() ;
+OutputPM() ;
diff --git a/storage/bdb/perl/BerkeleyDB/mkpod b/storage/bdb/perl/BerkeleyDB/mkpod
new file mode 100755
index 00000000000..44bbf3fbf4f
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/mkpod
@@ -0,0 +1,146 @@
+#!/usr/local/bin/perl5
+
+# Filename: mkpod
+#
+# Author: Paul Marquess
+
+# File types
+#
+# Macro files end with .M
+# Tagged source files end with .T
+# Output from the code ends with .O
+# Pre-Pod file ends with .P
+#
+# Tags
+#
+# ## BEGIN tagname
+# ...
+# ## END tagname
+#
+# ## 0
+# ## 1
+#
+
+# Constants
+
+$TOKEN = '##' ;
+$Verbose = 1 if $ARGV[0] =~ /^-v/i ;
+
+# Macros files first
+foreach $file (glob("*.M"))
+{
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ print " Processing Macro file $file\n" ;
+ while (<F>)
+ {
+ # Skip blank & comment lines
+ next if /^\s*$/ || /^\s*#/ ;
+
+ #
+ ($name, $expand) = split (/\t+/, $_, 2) ;
+
+ $expand =~ s/^\s*// ;
+ $expand =~ s/\s*$// ;
+
+ if ($expand =~ /\[#/ )
+ {
+ }
+
+ $Macros{$name} = $expand ;
+ }
+ close F ;
+}
+
+# Suck up all the code files
+foreach $file (glob("t/*.T"))
+{
+ ($newfile = $file) =~ s/\.T$// ;
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ open (N, ">$newfile") or die "Cannot open '$newfile':$!\n" ;
+
+ print " Processing $file -> $newfile\n" ;
+
+ while ($line = <F>)
+ {
+ if ($line =~ /^$TOKEN\s*BEGIN\s+(\w+)\s*$/ or
+ $line =~ m[\s*/\*$TOKEN\s*BEGIN\s+(\w+)\s*$] )
+ {
+ print " Section $1 begins\n" if $Verbose ;
+ $InSection{$1} ++ ;
+ $Section{$1} = '' unless $Section{$1} ;
+ }
+ elsif ($line =~ /^$TOKEN\s*END\s+(\w+)\s*$/ or
+ $line =~ m[^\s*/\*$TOKEN\s*END\s+(\w+)\s*$] )
+ {
+ warn "Encountered END without a begin [$line]\n"
+ unless $InSection{$1} ;
+
+ delete $InSection{$1} ;
+ print " Section $1 ends\n" if $Verbose ;
+ }
+ else
+ {
+ print N $line ;
+ chop $line ;
+ $line =~ s/\s*$// ;
+
+ # Save the current line in each of the sections
+ foreach( keys %InSection)
+ {
+ if ($line !~ /^\s*$/ )
+ #{ $Section{$_} .= " $line" }
+ { $Section{$_} .= $line }
+ $Section{$_} .= "\n" ;
+ }
+ }
+
+ }
+
+ if (%InSection)
+ {
+ # Check for unclosed sections
+ print "The following Sections are not terminated\n" ;
+ foreach (sort keys %InSection)
+ { print "\t$_\n" }
+ exit 1 ;
+ }
+
+ close F ;
+ close N ;
+}
+
+print "\n\nCreating pod file(s)\n\n" if $Verbose ;
+
+@ppods = glob('*.P') ;
+#$ppod = $ARGV[0] ;
+#$pod = $ARGV[1] ;
+
+# Now process the pre-pod file
+foreach $ppod (@ppods)
+{
+ ($pod = $ppod) =~ s/\.P$// ;
+ open (PPOD, "<$ppod") or die "Cannot open file '$ppod': $!\n" ;
+ open (POD, ">$pod") or die "Cannot open file '$pod': $!\n" ;
+
+ print " $ppod -> $pod\n" ;
+
+ while ($line = <PPOD>)
+ {
+ if ( $line =~ /^\s*$TOKEN\s*(\w+)\s*$/)
+ {
+ warn "No code insert '$1' available\n"
+ unless $Section{$1} ;
+
+ print "Expanding section $1\n" if $Verbose ;
+ print POD $Section{$1} ;
+ }
+ else
+ {
+# $line =~ s/\[#([^\]])]/$Macros{$1}/ge ;
+ print POD $line ;
+ }
+ }
+
+ close PPOD ;
+ close POD ;
+}
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004 b/storage/bdb/perl/BerkeleyDB/patches/5.004
new file mode 100644
index 00000000000..143ec95afbc
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_01 b/storage/bdb/perl/BerkeleyDB/patches/5.004_01
new file mode 100644
index 00000000000..1b05eb4e02b
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_02 b/storage/bdb/perl/BerkeleyDB/patches/5.004_02
new file mode 100644
index 00000000000..238f8737941
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_03 b/storage/bdb/perl/BerkeleyDB/patches/5.004_03
new file mode 100644
index 00000000000..06331eac922
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_04 b/storage/bdb/perl/BerkeleyDB/patches/5.004_04
new file mode 100644
index 00000000000..a227dc700d9
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.004_05 b/storage/bdb/perl/BerkeleyDB/patches/5.004_05
new file mode 100644
index 00000000000..51c8bf35009
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005 b/storage/bdb/perl/BerkeleyDB/patches/5.005
new file mode 100644
index 00000000000..effee3e8275
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_01 b/storage/bdb/perl/BerkeleyDB/patches/5.005_01
new file mode 100644
index 00000000000..2a05dd545f6
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_02 b/storage/bdb/perl/BerkeleyDB/patches/5.005_02
new file mode 100644
index 00000000000..5dd57ddc03f
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.005_03 b/storage/bdb/perl/BerkeleyDB/patches/5.005_03
new file mode 100644
index 00000000000..115f9f5b909
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/storage/bdb/perl/BerkeleyDB/patches/5.6.0 b/storage/bdb/perl/BerkeleyDB/patches/5.6.0
new file mode 100644
index 00000000000..1f9b3b620de
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/BerkeleyDB/ppport.h b/storage/bdb/perl/BerkeleyDB/ppport.h
new file mode 100644
index 00000000000..0887c2159a9
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/storage/bdb/perl/BerkeleyDB/scan b/storage/bdb/perl/BerkeleyDB/scan
new file mode 100644
index 00000000000..eb064950b2e
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/scan
@@ -0,0 +1,229 @@
+#!/usr/local/bin/perl
+
+my $ignore_re = '^(' . join("|",
+ qw(
+ _
+ [a-z]
+ DBM
+ DBC
+ DB_AM_
+ DB_BT_
+ DB_RE_
+ DB_HS_
+ DB_FUNC_
+ DB_DBT_
+ DB_DBM
+ DB_TSL
+ MP
+ TXN
+ )) . ')' ;
+
+my %ignore_def = map {$_, 1} qw() ;
+
+%ignore_enums = map {$_, 1} qw( ACTION db_status_t db_notices db_lockmode_t ) ;
+
+my $filler = ' ' x 26 ;
+
+chdir "libraries" || die "Cannot chdir into './libraries': $!\n";
+
+foreach my $name (sort tuple glob "[2-9]*")
+{
+ my $inc = "$name/include/db.h" ;
+ next unless -f $inc ;
+
+ my $file = readFile($inc) ;
+ StripCommentsAndStrings($file) ;
+ my $result = scan($name, $file) ;
+ print "\n\t#########\n\t# $name\n\t#########\n\n$result"
+ if $result;
+}
+exit ;
+
+
+sub scan
+{
+ my $version = shift ;
+ my $file = shift ;
+
+ my %seen_define = () ;
+ my $result = "" ;
+
+ if (1) {
+ # Preprocess all tri-graphs
+ # including things stuck in quoted string constants.
+ $file =~ s/\?\?=/#/g; # | ??=| #|
+ $file =~ s/\?\?\!/|/g; # | ??!| ||
+ $file =~ s/\?\?'/^/g; # | ??'| ^|
+ $file =~ s/\?\?\(/[/g; # | ??(| [|
+ $file =~ s/\?\?\)/]/g; # | ??)| ]|
+ $file =~ s/\?\?\-/~/g; # | ??-| ~|
+ $file =~ s/\?\?\//\\/g; # | ??/| \|
+ $file =~ s/\?\?</{/g; # | ??<| {|
+ $file =~ s/\?\?>/}/g; # | ??>| }|
+ }
+
+ while ( $file =~ /^\s*#\s*define\s+([\$\w]+)\b(?!\()\s*(.*)/gm )
+ {
+ my $def = $1;
+ my $rest = $2;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_def{$def} || $def =~ /$ignore_re/o ;
+
+ # Cannot do: (-1) and ((LHANDLE)3) are OK:
+ #print("Skip non-wordy $def => $rest\n"),
+
+ $rest =~ s/\s*$//;
+ #next if $rest =~ /[^\w\$]/;
+
+ #print "Matched $_ ($def)\n" ;
+
+ next if $before{$def} ++ ;
+
+ if ($ignore)
+ { $seen_define{$def} = 'IGNORE' }
+ elsif ($rest =~ /"/)
+ { $seen_define{$def} = 'STRING' }
+ else
+ { $seen_define{$def} = 'DEFINE' }
+ }
+
+ foreach $define (sort keys %seen_define)
+ {
+ my $out = $filler ;
+ substr($out,0, length $define) = $define;
+ $result .= "\t$out => $seen_define{$define},\n" ;
+ }
+
+ while ($file =~ /\btypedef\s+enum\s*{(.*?)}\s*(\w+)/gs )
+ {
+ my $enum = $1 ;
+ my $name = $2 ;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_enums{$name} ;
+
+ #$enum =~ s/\s*=\s*\S+\s*(,?)\s*\n/$1/g;
+ $enum =~ s/^\s*//;
+ $enum =~ s/\s*$//;
+
+ my @tokens = map { s/\s*=.*// ; $_} split /\s*,\s*/, $enum ;
+ my @new = grep { ! $Enums{$_}++ } @tokens ;
+ if (@new)
+ {
+ my $value ;
+ if ($ignore)
+ { $value = "IGNORE, # $version" }
+ else
+ { $value = "'$version'," }
+
+ $result .= "\n\t# enum $name\n";
+ my $out = $filler ;
+ foreach $name (@new)
+ {
+ $out = $filler ;
+ substr($out,0, length $name) = $name;
+ $result .= "\t$out => $value\n" ;
+ }
+ }
+ }
+
+ return $result ;
+}
+
+
+sub StripCommentsAndStrings
+{
+
+ # Strip C & C++ coments
+ # From the perlfaq
+ $_[0] =~
+
+ s{
+ /\* ## Start of /* ... */ comment
+ [^*]*\*+ ## Non-* followed by 1-or-more *'s
+ (
+ [^/*][^*]*\*+
+ )* ## 0-or-more things which don't start with /
+ ## but do end with '*'
+ / ## End of /* ... */ comment
+
+ | ## OR C++ Comment
+ // ## Start of C++ comment //
+ [^\n]* ## followed by 0-or-more non end of line characters
+
+ | ## OR various things which aren't comments:
+
+ (
+ " ## Start of " ... " string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^"\\] ## Non "\
+ )*
+ " ## End of " ... " string
+
+ | ## OR
+
+ ' ## Start of ' ... ' string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^'\\] ## Non '\
+ )*
+ ' ## End of ' ... ' string
+
+ | ## OR
+
+ . ## Anything other char
+ [^/"'\\]* ## Chars which doesn't start a comment, string or escape
+ )
+ }{$2}gxs;
+
+
+
+ # Remove double-quoted strings.
+ #$_[0] =~ s#"(\\.|[^"\\])*"##g;
+
+ # Remove single-quoted strings.
+ #$_[0] =~ s#'(\\.|[^'\\])*'##g;
+
+ # Remove leading whitespace.
+ $_[0] =~ s/\A\s+//m ;
+
+ # Remove trailing whitespace.
+ $_[0] =~ s/\s+\Z//m ;
+
+ # Replace all multiple whitespace by a single space.
+ #$_[0] =~ s/\s+/ /g ;
+}
+
+
+sub readFile
+{
+ my $filename = shift ;
+ open F, "<$filename" || die "Cannot open $filename: $!\n" ;
+ local $/ ;
+ my $x = <F> ;
+ close F ;
+ return $x ;
+}
+
+sub tuple
+{
+ my (@a) = split(/\./, $a) ;
+ my (@b) = split(/\./, $b) ;
+ if (@a != @b) {
+ my $diff = @a - @b ;
+ push @b, (0 x $diff) if $diff > 0 ;
+ push @a, (0 x -$diff) if $diff < 0 ;
+ }
+ foreach $A (@a) {
+ $B = shift @b ;
+ $A == $B or return $A <=> $B ;
+ }
+ return 0;
+}
+
+__END__
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/btree.t b/storage/bdb/perl/BerkeleyDB/t/btree.t
new file mode 100644
index 00000000000..fd6ed8f1268
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/btree.t
@@ -0,0 +1,931 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..244\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Btree -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Btree -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Btree
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $db->status() == DB_NOTFOUND ;
+ ok 17, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 18, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 19, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 20, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 21, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 22, $db->db_get("key", $value) == 0 ;
+ ok 23, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 24, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put("some key", "some value") == 0 ;
+ ok 31, $db->db_get("some key", $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'};
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'};
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 46, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 47, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 48, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 49, tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 50, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 51, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 52, (tied %hash)->status() == 0 ;
+ ok 53, $hash{"some key"} eq "some value";
+ ok 54, defined $hash{"some key"} ;
+ ok 55, (tied %hash)->status() == 0 ;
+ ok 56, exists $hash{"some key"} ;
+ ok 57, !defined $hash{"jimmy"} ;
+ ok 58, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 59, !exists $hash{"jimmy"} ;
+ ok 60, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, ! defined $hash{"some key"} ;
+ ok 63, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 64, ! exists $hash{"some key"} ;
+ ok 65, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 66, $count == 3 ;
+ ok 67, $keys == 1011 ;
+ ok 68, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 69, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # override default compare
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+ ok 70, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 71, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 72, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+ }
+
+ sub ArrayCompare
+ {
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+ }
+
+ ok 73, ArrayCompare (\@srt_1, [keys %h]);
+ ok 74, ArrayCompare (\@srt_2, [keys %g]);
+ ok 75, ArrayCompare (\@srt_3, [keys %k]);
+
+}
+
+{
+ # override default compare, with duplicates, don't sort values
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 def ) ;
+ my @Values = qw( 1 0 3 dd x abc 0 ) ;
+ ok 76, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 77, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 78, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ $k{$_} = $value ;
+ }
+
+ sub getValues
+ {
+ my $hash = shift ;
+ my $db = tied %$hash ;
+ my $cursor = $db->db_cursor() ;
+ my @values = () ;
+ my ($k, $v) = (0,0) ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ push @values, $v ;
+ }
+ return @values ;
+ }
+
+ ok 79, ArrayCompare (\@srt_1, [keys %h]);
+ ok 80, ArrayCompare (\@srt_2, [keys %g]);
+ ok 81, ArrayCompare (\@srt_3, [keys %k]);
+ ok 82, ArrayCompare ([qw(dd 0 0 x 3 1 abc)], [getValues \%h]);
+ ok 83, ArrayCompare ([qw(dd 1 0 3 x abc 0)], [getValues \%g]);
+ ok 84, ArrayCompare ([qw(0 x 3 0 1 dd abc)], [getValues \%k]);
+
+ # test DB_DUP_NEXT
+ ok 85, my $cur = (tied %g)->db_cursor() ;
+ my ($k, $v) = (9, "") ;
+ ok 86, $cur->c_get($k, $v, DB_SET) == 0 ;
+ ok 87, $k == 9 && $v == 0 ;
+ ok 88, $cur->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 89, $k == 9 && $v eq "x" ;
+ ok 90, $cur->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+}
+
+{
+ # override default compare, with duplicates, sort values
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my $value ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+ ok 91, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 92, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+
+
+
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 93, ArrayCompare (\@srt_1, [keys %h]);
+ ok 94, ArrayCompare (\@srt_2, [keys %g]);
+ ok 95, ArrayCompare ([qw(dd 1 3 x 2 11 abc 0)], [getValues \%g]);
+ ok 96, ArrayCompare ([qw(dd 0 11 2 x 3 1 abc)], [getValues \%h]);
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 97, my $YY = tie %hh, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 98, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 99, scalar $YY->get_dup('Smith') == 1 ;
+ ok 100, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 101, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 102, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 103, (@wall == 3 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 104, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 105, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 106, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 107, my $db = tie %hash, 'BerkeleyDB::Btree' ;
+
+ ok 108, $db->db_put("some key", "some value") == 0 ;
+ ok 109, $db->db_get("some key", $value) == 0 ;
+ ok 110, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 111, my $db = new BerkeleyDB::Btree, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 112, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 113, ! $pon && $off == 0 && $len == 0 ;
+ ok 114, $db->db_get("red", $value) == 0 && $value eq "bo" ;
+ ok 115, $db->db_get("green", $value) == 0 && $value eq "ho" ;
+ ok 116, $db->db_get("blue", $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 117, $pon ;
+ ok 118, $off == 0 ;
+ ok 119, $len == 2 ;
+ ok 120, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 121, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 122, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 3 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 127, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 128, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 129, $db->db_put("red", "") == 0 ;
+ ok 130, $db->db_put("green", "AB") == 0 ;
+ ok 131, $db->db_put("blue", "XYZ") == 0 ;
+ ok 132, $db->db_put("new", "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 133, $pon ;
+ ok 134, $off == 0 ;
+ ok 135, $len == 2 ;
+ ok 136, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 137, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 138, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 139, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 140, ! $pon ;
+ ok 141, $off == 0 ;
+ ok 142, $len == 0 ;
+ ok 143, $db->db_put("red", "PPP") == 0 ;
+ ok 144, $db->db_put("green", "Q") == 0 ;
+ ok 145, $db->db_put("blue", "XYZ") == 0 ;
+ ok 146, $db->db_put("new", "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 147, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 148, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 149, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 150, $db->db_get("new", $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 151, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 152, $hash{"red"} eq "bo" ;
+ ok 153, $hash{"green"} eq "ho" ;
+ ok 154, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 155, $hash{"red"} eq "t" ;
+ ok 156, $hash{"green"} eq "se" ;
+ ok 157, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 158, $hash{"red"} eq "boat" ;
+ ok 159, $hash{"green"} eq "house" ;
+ ok 160, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 161, $hash{"red"} = "" ;
+ ok 162, $hash{"green"} = "AB" ;
+ ok 163, $hash{"blue"} = "XYZ" ;
+ ok 164, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 165, $hash{"red"} eq "at" ;
+ ok 166, $hash{"green"} eq "ABuse" ;
+ ok 167, $hash{"blue"} eq "XYZa" ;
+ ok 168, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 169, $hash{"red"} = "PPP" ;
+ ok 170, $hash{"green"} = "Q" ;
+ ok 171, $hash{"blue"} = "XYZ" ;
+ ok 172, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 173, $hash{"red"} eq "at\0PPP" ;
+ ok 174, $hash{"green"} eq "ABuQ" ;
+ ok 175, $hash{"blue"} eq "XYZXYZ" ;
+ ok 176, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 177, my $lexD = new LexDir($home) ;
+ ok 178, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 179, my $txn = $env->txn_begin() ;
+ ok 180, my $db1 = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 181, (my $Z = $txn->txn_commit()) == 0 ;
+ ok 182, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 183, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 184, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 185, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ #ok 151, $txn->txn_abort() == 0 ;
+ ok 186, ($Z = $txn->txn_abort()) == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 187, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 188, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 189, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 190, keys %hash == 6 ;
+
+ # create a cursor
+ ok 191, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 192, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 193, $key eq "Wall" && $value eq "Larry" ;
+ ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 195, $key eq "Wall" && $value eq "Stone" ;
+ ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 197, $key eq "Wall" && $value eq "Brick" ;
+ ok 198, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 199, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 200, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+#print "bt_flags " . $ref->{bt_flags} . " DB_DUP " . DB_DUP ."\n";
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 200, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Minkey =>3 ,
+ -Pagesize => 2 **12
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 201, $ref->{$recs} == 0;
+ ok 202, $ref->{'bt_minkey'} == 3;
+ ok 203, $ref->{'bt_pagesize'} == 2 ** 12;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 204, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 205, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Btree);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 206, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 207, $@ eq "" && $X ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 208, $@ eq "" ;
+ main::ok 209, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 210, $@ eq "" ;
+ main::ok 211, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 212, $@ eq "" ;
+ main::ok 213, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 214, $@ eq "" ;
+ main::ok 215, $ret eq "[[10]]" ;
+
+ undef $X;
+ untie %h;
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_RECNUM, DB_SET_RECNO & DB_GET_RECNO
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) = ("", "");
+ ok 216, my $db = new BerkeleyDB::Btree
+ -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Property => DB_RECNUM ;
+
+
+ # create some data
+ my @data = (
+ "A zero",
+ "B one",
+ "C two",
+ "D three",
+ "E four"
+ ) ;
+
+ my $ix = 0 ;
+ my $ret = 0 ;
+ foreach (@data) {
+ $ret += $db->db_put($_, $ix) ;
+ ++ $ix ;
+ }
+ ok 217, $ret == 0 ;
+
+ # db_get & DB_SET_RECNO
+ $k = 1 ;
+ ok 218, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 219, $k eq "B one" && $v == 1 ;
+
+ $k = 3 ;
+ ok 220, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 221, $k eq "D three" && $v == 3 ;
+
+ $k = 4 ;
+ ok 222, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 223, $k eq "E four" && $v == 4 ;
+
+ $k = 0 ;
+ ok 224, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 225, $k eq "A zero" && $v == 0 ;
+
+ # cursor & DB_SET_RECNO
+
+ # create the cursor
+ ok 226, my $cursor = $db->db_cursor() ;
+
+ $k = 2 ;
+ ok 227, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 228, $k eq "C two" && $v == 2 ;
+
+ $k = 0 ;
+ ok 229, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
+ ok 230, $k eq "A zero" && $v == 0 ;
+
+ $k = 3 ;
+ ok 231, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 232, $k eq "D three" && $v == 3 ;
+
+ # cursor & DB_GET_RECNO
+ ok 233, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 234, $k eq "A zero" && $v == 0 ;
+ ok 235, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 236, $v == 0 ;
+
+ ok 237, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 238, $k eq "B one" && $v == 1 ;
+ ok 239, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 240, $v == 1 ;
+
+ ok 241, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 242, $k eq "E four" && $v == 4 ;
+ ok 243, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 244, $v == 4 ;
+
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/destroy.t b/storage/bdb/perl/BerkeleyDB/t/destroy.t
new file mode 100644
index 00000000000..7457d36c583
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/destroy.t
@@ -0,0 +1,105 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..15\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # let object destruction kill everything
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ ok 1, my $lexD = new LexDir($home) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 3, my $txn = $env->txn_begin() ;
+ ok 4, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 5, $txn->txn_commit() == 0 ;
+ ok 6, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 7, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 8, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 9, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 10, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 11, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 12, $count == 0 ;
+
+ #undef $txn ;
+ #undef $cursor ;
+ #undef $db1 ;
+ #undef $env ;
+ #untie %hash ;
+
+}
+
+{
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $cursor ;
+ my ($k, $v) = ("", "") ;
+ ok 13, my $db1 = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ my $count = 0 ;
+ # sequence forwards
+ ok 14, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 15, $count == 0 ;
+}
+
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/env.t b/storage/bdb/perl/BerkeleyDB/t/env.t
new file mode 100644
index 00000000000..3905abfae43
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/env.t
@@ -0,0 +1,217 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..47\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # db version stuff
+ my ($major, $minor, $patch) = (0, 0, 0) ;
+
+ ok 1, my $VER = BerkeleyDB::DB_VERSION_STRING ;
+ ok 2, my $ver = BerkeleyDB::db_version($major, $minor, $patch) ;
+ ok 3, $VER eq $ver ;
+ ok 4, $major > 1 ;
+ ok 5, defined $minor ;
+ ok 6, defined $patch ;
+}
+
+{
+ # Check for invalid parameters
+ my $env ;
+ eval ' $env = new BerkeleyDB::Env( -Stupid => 3) ; ' ;
+ ok 7, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $env = new BerkeleyDB::Env( -Bad => 2, -Home => "/tmp", -Stupid => 3) ; ' ;
+ ok 8, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $env = new BerkeleyDB::Env (-Config => {"fred" => " "} ) ; ' ;
+ ok 9, !$env ;
+ ok 10, $BerkeleyDB::Error =~ /^illegal name-value pair/ ;
+}
+
+{
+ # create a very simple environment
+ my $home = "./fred" ;
+ ok 11, my $lexD = new LexDir($home) ;
+ chdir "./fred" ;
+ ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ chdir ".." ;
+ undef $env ;
+}
+
+{
+ # create an environment with a Home
+ my $home = "./fred" ;
+ ok 13, my $lexD = new LexDir($home) ;
+ ok 14, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ;
+
+ undef $env ;
+}
+
+{
+ # make new fail.
+ my $home = "./not_there" ;
+ rmtree $home ;
+ ok 15, ! -d $home ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_INIT_LOCK ;
+ ok 16, ! $env ;
+ ok 17, $! != 0 || $^E != 0 ;
+
+ rmtree $home ;
+}
+
+{
+ # Config
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 18, my $lexD = new LexDir($home) ;
+ ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 21, $env ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+
+ my %hash ;
+ ok 23, tie %hash, 'BerkeleyDB::Hash', -Filename => $data_file,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ $hash{"abc"} = 123 ;
+ $hash{"def"} = 456 ;
+
+ $txn->txn_commit() ;
+
+ untie %hash ;
+
+ undef $txn ;
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filename
+ my $errfile = "./errfile" ;
+ my $home = "./fred" ;
+ ok 24, my $lexD = new LexDir($home) ;
+ my $lex = new LexFile $errfile ;
+ ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 26, !$db ;
+
+ ok 27, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ ok 28, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 29, $BerkeleyDB::Error eq $contents ;
+
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filehandle/reference -- should fail
+ my $home = "./fred" ;
+ ok 30, my $lexD = new LexDir($home) ;
+ eval { my $env = new BerkeleyDB::Env( -ErrFile => [],
+ -Flags => DB_CREATE,
+ -Home => $home) ; };
+ ok 31, $@ =~ /ErrFile parameter must be a file name/;
+}
+
+{
+ # -ErrPrefix
+ use IO ;
+ my $home = "./fred" ;
+ ok 32, my $lexD = new LexDir($home) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 33, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -ErrPrefix => "PREFIX",
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 34, !$db ;
+
+ ok 35, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
+ ok 36, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 37, $BerkeleyDB::Error eq $contents ;
+
+ # change the prefix on the fly
+ my $old = $env->errPrefix("NEW ONE") ;
+ ok 38, $old eq "PREFIX" ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 39, !$db ;
+ ok 40, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 41, $contents =~ /$BerkeleyDB::Error$/ ;
+ undef $env ;
+}
+
+{
+ # test db_appexit
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 42, my $lexD = new LexDir($home);
+ ok 43, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 44, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 45, $env ;
+
+ ok 46, my $txn_mgr = $env->TxnMgr() ;
+
+ ok 47, $env->db_appexit() == 0 ;
+
+}
+
+# test -Verbose
+# test -Flags
+# db_value_set
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples.t b/storage/bdb/perl/BerkeleyDB/t/examples.t
new file mode 100644
index 00000000000..69b7f8ff8c5
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/examples.t
@@ -0,0 +1,401 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+print "1..7\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples.t.T b/storage/bdb/perl/BerkeleyDB/t/examples.t.T
new file mode 100644
index 00000000000..fe9bdf76b06
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/examples.t.T
@@ -0,0 +1,415 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+print "1..7\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+## END simpleHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash2
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END simpleHash2
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSimple
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSimple
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSortOrder
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSortOrder
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN nullFilter
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END nullFilter
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN intFilter
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END intFilter
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+## BEGIN simpleRecno
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+## END simpleRecno
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples3.t b/storage/bdb/perl/BerkeleyDB/t/examples3.t
new file mode 100644
index 00000000000..22e94b770e1
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/examples3.t
@@ -0,0 +1,132 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/examples3.t.T b/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
new file mode 100644
index 00000000000..5eeaa14d00c
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/examples3.t.T
@@ -0,0 +1,136 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupSortHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupSortHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/filter.t b/storage/bdb/perl/BerkeleyDB/t/filter.t
new file mode 100644
index 00000000000..47a7c107acf
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/filter.t
@@ -0,0 +1,217 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..46\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+{
+ # DBM Filter tests
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok 1, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok 2, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 3, $h{"fred"} eq "joe";
+ # fk sk fv sv
+ ok 4, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 5, $db->FIRSTKEY() eq "fred" ;
+ # fk sk fv sv
+ ok 6, checkOutput( "fred", "", "", "") ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok 7, checkOutput( "", "fred", "", "Jxe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 8, $h{"Fred"} eq "[Jxe]";
+ # fk sk fv sv
+ ok 9, checkOutput( "", "fred", "[Jxe]", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 10, $db->FIRSTKEY() eq "FRED" ;
+ # fk sk fv sv
+ ok 11, checkOutput( "FRED", "", "", "") ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 12, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 13, $h{"fred"} eq "joe";
+ ok 14, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 15, $db->FIRSTKEY() eq "fred" ;
+ ok 16, checkOutput( "fred", "", "", "") ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 17, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 18, $h{"fred"} eq "joe";
+ ok 19, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 20, $db->FIRSTKEY() eq "fred" ;
+ ok 21, checkOutput( "", "", "", "") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok 22, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok 23, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 24, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 25, ! defined $result{"fetch key"} ;
+ ok 26, ! defined $result{"fetch value"} ;
+ ok 27, $_ eq "original" ;
+
+ ok 28, $db->FIRSTKEY() eq "fred" ;
+ ok 29, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 30, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 31, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 32, ! defined $result{"fetch value"} ;
+ ok 33, $_ eq "original" ;
+
+ $h{"jim"} = "john" ;
+ ok 34, $result{"store key"} eq "store key - 2: [fred jim]" ;
+ ok 35, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 36, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 37, ! defined $result{"fetch value"} ;
+ ok 38, $_ eq "original" ;
+
+ ok 39, $h{"fred"} eq "joe" ;
+ ok 40, $result{"store key"} eq "store key - 3: [fred jim fred]" ;
+ ok 41, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 42, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 43, $result{"fetch value"} eq "fetch value - 1: [joe]" ;
+ ok 44, $_ eq "original" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok 45, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok 46, $@ =~ /^BerkeleyDB Aborting: recursion detected in filter_store_key at/ ;
+ #print "[$@]\n" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/hash.t b/storage/bdb/perl/BerkeleyDB/t/hash.t
new file mode 100644
index 00000000000..0e683851c3d
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/hash.t
@@ -0,0 +1,728 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..212\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Hash -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Hash -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to HASH
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 17, $db->status() == DB_NOTFOUND ;
+ ok 18, $db->status() eq $DB_errors{'DB_NOTFOUND'};
+
+ ok 19, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 20, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 21, $db->status() eq $DB_errors{'DB_KEYEXIST'};
+ ok 22, $db->status() == DB_KEYEXIST ;
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 23, $db->db_get("key", $value) == 0 ;
+ ok 24, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 27, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 28, my $lexD = new LexDir($home);
+
+ ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 31, $db->db_put("some key", "some value") == 0 ;
+ ok 32, $db->db_get("some key", $value) == 0 ;
+ ok 33, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+{
+ # override default hash
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ $::count = 0 ;
+ ok 34, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Hash => sub { ++$::count ; length $_[0] },
+ -Flags => DB_CREATE ;
+
+ ok 35, $db->db_put("some key", "some value") == 0 ;
+ ok 36, $db->db_get("some key", $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $::count > 0 ;
+
+}
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 39, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 40, $ret == 0 ;
+
+ # create the cursor
+ ok 41, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 42, $cursor->status() == DB_NOTFOUND ;
+ ok 43, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 46, $status == DB_NOTFOUND ;
+ ok 47, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 48, $cursor->status() == $status ;
+ ok 49, $cursor->status() eq $status ;
+ ok 50, keys %copy == 0 ;
+ ok 51, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 52, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 53, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 54, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 55, tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 56, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 57, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 58, (tied %hash)->status() == 0 ;
+ ok 59, $hash{"some key"} eq "some value";
+ ok 60, defined $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, exists $hash{"some key"} ;
+ ok 63, !defined $hash{"jimmy"} ;
+ ok 64, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 65, !exists $hash{"jimmy"} ;
+ ok 66, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 67, (tied %hash)->status() == 0 ;
+ ok 68, ! defined $hash{"some key"} ;
+ ok 69, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 70, ! exists $hash{"some key"} ;
+ ok 71, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 72, $count == 3 ;
+ ok 73, $keys == 1011 ;
+ ok 74, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 75, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 76, my $db = tie %hash, 'BerkeleyDB::Hash' ;
+
+ ok 77, $db->db_put("some key", "some value") == 0 ;
+ ok 78, $db->db_get("some key", $value) == 0 ;
+ ok 79, $value eq "some value" ;
+
+ undef $db ;
+ untie %hash ;
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 80, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 81, $ret == 0 ;
+
+
+ # do a partial get
+ my($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 82, $pon == 0 && $off == 0 && $len == 0 ;
+ ok 83, ( $db->db_get("red", $value) == 0) && $value eq "bo" ;
+ ok 84, ( $db->db_get("green", $value) == 0) && $value eq "ho" ;
+ ok 85, ( $db->db_get("blue", $value) == 0) && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 86, $pon ;
+ ok 87, $off == 0 ;
+ ok 88, $len == 2 ;
+ ok 89, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 90, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 91, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 92, $pon ;
+ ok 93, $off == 3 ;
+ ok 94, $len == 2 ;
+ ok 95, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 96, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 97, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 98, ! $pon ;
+ ok 99, $off == 0 ;
+ ok 100, $len == 0 ;
+ ok 101, $db->db_put("red", "") == 0 ;
+ ok 102, $db->db_put("green", "AB") == 0 ;
+ ok 103, $db->db_put("blue", "XYZ") == 0 ;
+ ok 104, $db->db_put("new", "KLM") == 0 ;
+
+ $db->partial_clear() ;
+ ok 105, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 106, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 107, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 108, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 109, $db->db_put("red", "PPP") == 0 ;
+ ok 110, $db->db_put("green", "Q") == 0 ;
+ ok 111, $db->db_put("blue", "XYZ") == 0 ;
+ ok 112, $db->db_put("new", "--") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 117, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 118, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 119, $db->db_get("new", $value) == 0 && $value eq "KLM--" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 120, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 121, $hash{"red"} eq "bo" ;
+ ok 122, $hash{"green"} eq "ho" ;
+ ok 123, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 124, $hash{"red"} eq "t" ;
+ ok 125, $hash{"green"} eq "se" ;
+ ok 126, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 127, $hash{"red"} eq "boat" ;
+ ok 128, $hash{"green"} eq "house" ;
+ ok 129, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 130, $hash{"red"} = "" ;
+ ok 131, $hash{"green"} = "AB" ;
+ ok 132, $hash{"blue"} = "XYZ" ;
+ ok 133, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 134, $hash{"red"} eq "at" ;
+ ok 135, $hash{"green"} eq "ABuse" ;
+ ok 136, $hash{"blue"} eq "XYZa" ;
+ ok 137, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 138, $hash{"red"} = "PPP" ;
+ ok 139, $hash{"green"} = "Q" ;
+ ok 140, $hash{"blue"} = "XYZ" ;
+ ok 141, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 142, $hash{"red"} eq "at\0PPP" ;
+ ok 143, $hash{"green"} eq "ABuQ" ;
+ ok 144, $hash{"blue"} eq "XYZXYZ" ;
+ ok 145, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 146, my $lexD = new LexDir($home);
+ ok 147, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 148, my $txn = $env->txn_begin() ;
+ ok 149, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 150, $txn->txn_commit() == 0 ;
+ ok 151, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 152, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 153, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 154, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 155, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 156, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 157, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 158, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 159, keys %hash == 6 ;
+
+ # create a cursor
+ ok 160, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 161, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 162, $key eq "Wall" && $value eq "Larry" ;
+ ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 164, $key eq "Wall" && $value eq "Stone" ;
+ ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 166, $key eq "Wall" && $value eq "Brick" ;
+ ok 167, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 168, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 143, $ref->{bt_flags} | DB_DUP ;
+
+ # test DB_DUP_NEXT
+ my ($k, $v) = ("Wall", "") ;
+ ok 169, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 170, $k eq "Wall" && $v eq "Larry" ;
+ ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 172, $k eq "Wall" && $v eq "Stone" ;
+ ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 174, $k eq "Wall" && $v eq "Brick" ;
+ ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 176, $k eq "Wall" && $v eq "Brick" ;
+ ok 177, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # DB_DUP & DupCompare
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my ($key, $value) ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+
+ ok 178, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ ok 179, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 180, my $cursor = (tied %h)->db_cursor() ;
+ $key = 9 ; $value = "";
+ ok 181, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 182, $key == 9 && $value eq 11 ;
+ ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 184, $key == 9 && $value == 2 ;
+ ok 185, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 186, $key == 9 && $value eq "x" ;
+
+ $cursor = (tied %g)->db_cursor() ;
+ $key = 9 ;
+ ok 187, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 188, $key == 9 && $value eq "x" ;
+ ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 190, $key == 9 && $value == 2 ;
+ ok 191, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 192, $key == 9 && $value == 11 ;
+
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 193, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 194, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 195, scalar $YY->get_dup('Smith') == 1 ;
+ ok 196, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 197, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 198, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 199, (@wall == 3 && $wall{'Larry'}
+ && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 200, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 201, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 202, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Hash);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 203, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbhash.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 204, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 205, $@ eq "" ;
+ main::ok 206, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 211, $@ eq "" ;
+ main::ok 212, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
diff --git a/storage/bdb/perl/BerkeleyDB/t/join.t b/storage/bdb/perl/BerkeleyDB/t/join.t
new file mode 100644
index 00000000000..ed9b6a269cb
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/join.t
@@ -0,0 +1,225 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+if ($BerkeleyDB::db_ver < 2.005002)
+{
+ print "1..0 # Skip: join needs Berkeley DB 2.5.2 or later\n" ;
+ exit 0 ;
+}
+
+
+print "1..37\n";
+
+my $Dfile1 = "dbhash1.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+umask(0) ;
+
+{
+ # error cases
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my $value ;
+ my $status ;
+ my $cursor ;
+
+ ok 1, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] lt $_[1] },
+ -Property => DB_DUP|DB_DUPSORT ;
+
+ # no cursors supplied
+ eval '$cursor = $db1->db_join() ;' ;
+ ok 2, $@ =~ /Usage: \$db->BerkeleyDB::Common::db_join\Q([cursors], flags=0)/;
+
+ # empty list
+ eval '$cursor = $db1->db_join([]) ;' ;
+ ok 3, $@ =~ /db_join: No cursors in parameter list/;
+
+ # cursor list, isn't a []
+ eval '$cursor = $db1->db_join({}) ;' ;
+ ok 4, $@ =~ /cursors is not an array reference at/ ;
+
+ eval '$cursor = $db1->db_join(\1) ;' ;
+ ok 5, $@ =~ /cursors is not an array reference at/ ;
+
+}
+
+{
+ # test a 2-way & 3-way join
+
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my %hash2 ;
+ my %hash3 ;
+ my $value ;
+ my $status ;
+
+ my $home = "./fred" ;
+ ok 6, my $lexD = new LexDir($home);
+ ok 7, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN
+ |DB_INIT_MPOOL;
+ #|DB_INIT_MPOOL| DB_INIT_LOCK;
+ ok 8, my $txn = $env->txn_begin() ;
+ ok 9, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+ ;
+
+ ok 10, my $db2 = tie %hash2, 'BerkeleyDB::Hash',
+ -Filename => $Dfile2,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 11, my $db3 = tie %hash3, 'BerkeleyDB::Btree',
+ -Filename => $Dfile3,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 12, addData($db1, qw( apple Convenience
+ peach Shopway
+ pear Farmer
+ raspberry Shopway
+ strawberry Shopway
+ gooseberry Farmer
+ blueberry Farmer
+ ));
+
+ ok 13, addData($db2, qw( red apple
+ red raspberry
+ red strawberry
+ yellow peach
+ yellow pear
+ green gooseberry
+ blue blueberry)) ;
+
+ ok 14, addData($db3, qw( expensive apple
+ reasonable raspberry
+ expensive strawberry
+ reasonable peach
+ reasonable pear
+ expensive gooseberry
+ reasonable blueberry)) ;
+
+ ok 15, my $cursor2 = $db2->db_cursor() ;
+ my $k = "red" ;
+ my $v = "" ;
+ ok 16, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ # Two way Join
+ ok 17, my $cursor1 = $db1->db_join([$cursor2]) ;
+
+ my %expected = qw( apple Convenience
+ raspberry Shopway
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 18, keys %expected == 0 ;
+ ok 19, $cursor1->status() == DB_NOTFOUND ;
+
+ # Three way Join
+ ok 20, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 21, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 22, my $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 23, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 24, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple Convenience
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 25, keys %expected == 0 ;
+ ok 26, $cursor1->status() == DB_NOTFOUND ;
+
+ # test DB_JOIN_ITEM
+ # #################
+ ok 27, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 28, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 29, $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 30, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 31, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple 1
+ strawberry 1
+ ) ;
+
+ # sequence forwards
+ $k = "" ;
+ $v = "" ;
+ while ($cursor1->c_get($k, $v, DB_JOIN_ITEM) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} ;
+ #print "[$k]\n" ;
+ }
+ ok 32, keys %expected == 0 ;
+ ok 33, $cursor1->status() == DB_NOTFOUND ;
+
+ ok 34, $cursor1->c_close() == 0 ;
+ ok 35, $cursor2->c_close() == 0 ;
+ ok 36, $cursor3->c_close() == 0 ;
+
+ ok 37, ($status = $txn->txn_commit) == 0;
+
+ undef $txn ;
+ #undef $cursor1;
+ #undef $cursor2;
+ #undef $cursor3;
+ undef $db1 ;
+ undef $db2 ;
+ undef $db3 ;
+ undef $env ;
+ untie %hash1 ;
+ untie %hash2 ;
+ untie %hash3 ;
+}
+print "# at the end\n";
diff --git a/storage/bdb/perl/BerkeleyDB/t/mldbm.t b/storage/bdb/perl/BerkeleyDB/t/mldbm.t
new file mode 100644
index 00000000000..d35f7e15895
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/mldbm.t
@@ -0,0 +1,161 @@
+#!/usr/bin/perl -w
+
+use strict ;
+
+BEGIN
+{
+ if ($] < 5.005) {
+ print "1..0 # This is Perl $], skipping test\n" ;
+ exit 0 ;
+ }
+
+ eval { require Data::Dumper ; };
+ if ($@) {
+ print "1..0 # Data::Dumper is not installed on this system.\n";
+ exit 0 ;
+ }
+ if ($Data::Dumper::VERSION < 2.08) {
+ print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
+ exit 0 ;
+ }
+ eval { require MLDBM ; };
+ if ($@) {
+ print "1..0 # MLDBM is not installed on this system.\n";
+ exit 0 ;
+ }
+}
+
+use t::util ;
+
+print "1..12\n";
+
+{
+ package BTREE ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ $MLDBM::UseDB = "BerkeleyDB::Btree" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 1, $db ;
+ ::ok 2, $db->type() == DB_BTREE ;
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 3, $first eq $second ;
+ ::ok 4, $o{d} eq "{once upon a time}" ;
+ ::ok 5, $o{e} == 1024 ;
+ ::ok 6, $o{f} eq 1024.1024 ;
+
+}
+
+{
+
+ package HASH ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Hash) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ unlink $filename ;
+ $MLDBM::UseDB = "BerkeleyDB::Hash" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 7, $db ;
+ ::ok 8, $db->type() == DB_HASH ;
+
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 9, $first eq $second ;
+ ::ok 10, $o{d} eq "{once upon a time}" ;
+ ::ok 11, $o{e} == 1024 ;
+ ::ok 12, $o{f} eq 1024.1024 ;
+
+}
diff --git a/storage/bdb/perl/BerkeleyDB/t/queue.t b/storage/bdb/perl/BerkeleyDB/t/queue.t
new file mode 100644
index 00000000000..86add129ca4
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/queue.t
@@ -0,0 +1,763 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.3) {
+ print "1..0 # Skipping test, Queue needs Berkeley DB 3.3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..201\n";
+
+sub fillout
+{
+ my $var = shift ;
+ my $length = shift ;
+ my $pad = shift || " " ;
+ my $template = $pad x $length ;
+ substr($template, 0, length($var)) = $var ;
+ return $template ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Queue -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Queue -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Queue -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Queue
+
+{
+ my $lex = new LexFile $Dfile ;
+ my $rec_len = 10 ;
+ my $pad = "x" ;
+
+ ok 6, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Len => $rec_len,
+ -Pad => $pad;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq fillout("some value", $rec_len, $pad) ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq fillout("value", $rec_len, $pad) ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq fillout("value", $rec_len, $pad) ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ # and pad defaults to space
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ my $rec_len = 11 ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE,
+ -Len => $rec_len;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq fillout("some value", $rec_len) ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 5 ;
+ ok 33, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 10 ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq fillout("some value", $rec_len);
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_KEYEMPTY ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift isn't allowed
+# eval {
+# $FA ? unshift @array, "red", "green", "blue"
+# : $db->unshift("red", "green", "blue" ) ;
+# } ;
+# ok 64, $@ =~ /^unshift is unsupported with Queue databases/ ;
+ $array[0] = "red" ;
+ $array[1] = "green" ;
+ $array[2] = "blue" ;
+ $array[4] = 2 ;
+ ok 64, $array[0] eq fillout("red", $rec_len) ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 0 ;
+ ok 67, $v eq fillout("red", $rec_len) ;
+ ok 68, $array[1] eq fillout("green", $rec_len) ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 1 ;
+ ok 71, $v eq fillout("green", $rec_len) ;
+ ok 72, $array[2] eq fillout("blue", $rec_len) ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 2 ;
+ ok 75, $v eq fillout("blue", $rec_len) ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq fillout("red", $rec_len) ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq fillout("green", $rec_len) ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq fillout("blue", $rec_len) ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1002 ;
+ ok 86, $v eq fillout("end", $rec_len) ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1001 ;
+ ok 89, $v eq fillout("the", $rec_len) ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 1000 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq fillout("end", $rec_len) ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq fillout("the", $rec_len) ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ my $rec_len = 15 ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Len => $rec_len;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq fillout("some value", $rec_len) ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 101, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ my $r = $db->db_put($i, $data[$i]) ;
+ $ret += $r ;
+ }
+ ok 102, $ret == 0 ;
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq fillout("t", 2) ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq " " ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq fillout("house", $rec_len) ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") != 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XY") == 0 ;
+ ok 122, $db->db_put(4, "KLM") != 0 ;
+ ok 123, $db->db_put(4, "KL") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 124, $pon ;
+ ok 125, $off == 0 ;
+ ok 126, $len == 2 ;
+ ok 127, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 128, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse", $rec_len) ;
+ ok 129, $db->db_get(3, $value) == 0 && $value eq fillout("XYa", $rec_len) ;
+ ok 130, $db->db_get(4, $value) == 0 && $value eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 131, ! $pon ;
+ ok 132, $off == 0 ;
+ ok 133, $len == 0 ;
+ ok 134, $db->db_put(1, "PP") == 0 ;
+ ok 135, $db->db_put(2, "Q") != 0 ;
+ ok 136, $db->db_put(3, "XY") == 0 ;
+ ok 137, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 138, $db->db_get(1, $value) == 0 && $value eq fillout("boaPP", $rec_len) ;
+ ok 139, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse",$rec_len) ;
+ ok 140, $db->db_get(3, $value) == 0 && $value eq fillout("XYaXY", $rec_len) ;
+ ok 141, $db->db_get(4, $value) == 0 && $value eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 142, my $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ my $status = 0 ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ $status += $db->status() ;
+ }
+
+ ok 143, $status == 0 ;
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 144, $array[1] eq fillout("bo", 2) ;
+ ok 145, $array[2] eq fillout("ho", 2) ;
+ ok 146, $array[3] eq fillout("se", 2) ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 147, $array[1] eq fillout("t", 2) ;
+ ok 148, $array[2] eq fillout("se", 2) ;
+ ok 149, $array[3] eq fillout("", 2) ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 150, $array[1] eq fillout("boat", $rec_len) ;
+ ok 151, $array[2] eq fillout("house", $rec_len) ;
+ ok 152, $array[3] eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ $array[1] = "" ;
+ ok 153, $db->status() != 0 ;
+ $array[2] = "AB" ;
+ ok 154, $db->status() == 0 ;
+ $array[3] = "XY" ;
+ ok 155, $db->status() == 0 ;
+ $array[4] = "KL" ;
+ ok 156, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 157, $array[1] eq fillout("boat", $rec_len) ;
+ ok 158, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 159, $array[3] eq fillout("XYa", $rec_len) ;
+ ok 160, $array[4] eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ $array[1] = "PP" ;
+ ok 161, $db->status() == 0 ;
+ $array[2] = "Q" ;
+ ok 162, $db->status() != 0 ;
+ $array[3] = "XY" ;
+ ok 163, $db->status() == 0 ;
+ $array[4] = "TU" ;
+ ok 164, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 165, $array[1] eq fillout("boaPP", $rec_len) ;
+ ok 166, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 167, $array[3] eq fillout("XYaXY", $rec_len) ;
+ ok 168, $array[4] eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 169, my $lexD = new LexDir($home);
+ my $rec_len = 9 ;
+ ok 170, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 171, my $txn = $env->txn_begin() ;
+ ok 172, my $db1 = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+
+ ok 173, $txn->txn_commit() == 0 ;
+ ok 174, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 175, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 176, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 177, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 178, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 179, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 180, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "qs_ndata" : "qs_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 7 ;
+ ok 181, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ -Len => $rec_len,
+ -Pad => " "
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 182, $ref->{$recs} == 0;
+ ok 183, $ref->{'qs_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 184, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 185, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Queue);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 186, $@ eq "" ;
+ my @h ;
+ my $X ;
+ my $rec_len = 34 ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbqueue.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 ,
+ -Len => $rec_len,
+ -Pad => " "
+ );
+ ' ;
+
+ main::ok 187, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 194, $@ eq "" ;
+ main::ok 195, $ret eq "[[10]]" ;
+
+ undef $X ;
+ untie @h ;
+ unlink "SubDB.pm", "dbqueue.tmp" ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ my $rec_len = 21 ;
+ ok 196, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 197, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 198, $k == 4 ;
+ ok 199, $array[4] eq fillout("fred", $rec_len) ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 21 ;
+ ok 200, $db = tie @array, 'BerkeleyDB::Queue',
+ -Flags => DB_CREATE ,
+ -ArrayBase => 0,
+ -Len => $rec_len,
+ -Pad => " " ,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 201, ($FA ? pop @array : $db->pop()) eq fillout("first", $rec_len) ;
+
+ undef $db;
+ untie @array ;
+
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/storage/bdb/perl/BerkeleyDB/t/recno.t b/storage/bdb/perl/BerkeleyDB/t/recno.t
new file mode 100644
index 00000000000..64b1803f736
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/recno.t
@@ -0,0 +1,913 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..226\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Recno -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Recno -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Recno -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Recno
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq "value" ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+
+ ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq "some value";
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_NOTFOUND ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift
+ $FA ? unshift @array, "red", "green", "blue"
+ : $db->unshift("red", "green", "blue" ) ;
+ ok 64, $array[1] eq "red" ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 1 ;
+ ok 67, $v eq "red" ;
+ ok 68, $array[2] eq "green" ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 2 ;
+ ok 71, $v eq "green" ;
+ ok 72, $array[3] eq "blue" ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 3 ;
+ ok 75, $v eq "blue" ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq "red" ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq "green" ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq "blue" ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1001 ;
+ ok 86, $v eq "end" ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1000 ;
+ ok 89, $v eq "the" ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 999 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq "end" ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq "the" ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Recno' ;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 101, my $db = new BerkeleyDB::Recno, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ }
+ ok 102, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq "t" ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq "boat" ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq "house" ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") == 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XYZ") == 0 ;
+ ok 122, $db->db_put(4, "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 0 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get(1, $value) == 0 && $value eq "at" ;
+ ok 127, $db->db_get(2, $value) == 0 && $value eq "ABuse" ;
+ ok 128, $db->db_get(3, $value) == 0 && $value eq "XYZa" ;
+ ok 129, $db->db_get(4, $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 130, ! $pon ;
+ ok 131, $off == 0 ;
+ ok 132, $len == 0 ;
+ ok 133, $db->db_put(1, "PPP") == 0 ;
+ ok 134, $db->db_put(2, "Q") == 0 ;
+ ok 135, $db->db_put(3, "XYZ") == 0 ;
+ ok 136, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 137, $db->db_get(1, $value) == 0 && $value eq "at\0PPP" ;
+ ok 138, $db->db_get(2, $value) == 0 && $value eq "ABuQ" ;
+ ok 139, $db->db_get(3, $value) == 0 && $value eq "XYZXYZ" ;
+ ok 140, $db->db_get(4, $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ ok 141, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 142, $array[1] eq "bo" ;
+ ok 143, $array[2] eq "ho" ;
+ ok 144, $array[3] eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 145, $array[1] eq "t" ;
+ ok 146, $array[2] eq "se" ;
+ ok 147, $array[3] eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 148, $array[1] eq "boat" ;
+ ok 149, $array[2] eq "house" ;
+ ok 150, $array[3] eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 151, $array[1] = "" ;
+ ok 152, $array[2] = "AB" ;
+ ok 153, $array[3] = "XYZ" ;
+ ok 154, $array[4] = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 155, $array[1] eq "at" ;
+ ok 156, $array[2] eq "ABuse" ;
+ ok 157, $array[3] eq "XYZa" ;
+ ok 158, $array[4] eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 159, $array[1] = "PPP" ;
+ ok 160, $array[2] = "Q" ;
+ ok 161, $array[3] = "XYZ" ;
+ ok 162, $array[4] = "TU" ;
+
+ $db->partial_clear() ;
+ ok 163, $array[1] eq "at\0PPP" ;
+ ok 164, $array[2] eq "ABuQ" ;
+ ok 165, $array[3] eq "XYZXYZ" ;
+ ok 166, $array[4] eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 167, my $lexD = new LexDir($home);
+ ok 168, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 169, my $txn = $env->txn_begin() ;
+ ok 170, my $db1 = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 171, $txn->txn_commit() == 0 ;
+ ok 172, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 173, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 174, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 175, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 176, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 177, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 178, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ ok 179, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 180, $ref->{$recs} == 0;
+ ok 181, $ref->{'bt_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 182, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 183, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Recno);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 184, $@ eq "" ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbrecno.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 185, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret eq "[[10]]" ;
+
+ undef $X;
+ untie @h;
+ unlink "SubDB.pm", "dbrecno.tmp" ;
+
+}
+
+{
+ # variable length records, DB_DELIMETER -- defaults to \n
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 195, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 197, $x eq "abc-def--ghi-";
+}
+
+{
+ # fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 199, $x eq "abc def ghi " ;
+}
+
+{
+ # fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 200, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 201, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # DB_RENUMBER
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 202, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+ # create a few records
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ ok 203, my ($length, $joined) = joiner($db, "|") ;
+ ok 204, $length == 3 ;
+ ok 205, $joined eq "abc|def|ghi";
+
+ ok 206, $db->db_del(1) == 0 ;
+ ok 207, ($length, $joined) = joiner($db, "|") ;
+ ok 208, $length == 2 ;
+ ok 209, $joined eq "abc|ghi";
+
+ undef $db ;
+ untie @array ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 210, my $db = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 211, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 212, $k == 4 ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory Btree with an associated text file
+
+ my $lex = new LexFile $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 213, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
+ -ArrayBase => 0,
+ -Property => DB_RENUMBER,
+ -Flags => DB_CREATE ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 214, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # in-memory, variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 215, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Property => DB_RENUMBER,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 216, $x eq "abc-def--ghi-";
+}
+
+{
+ # in-memory, fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 217, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 218, $x eq "abc def ghi " ;
+}
+
+{
+ # in-memory, fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 219, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 220, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 221, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 222, $array[0] eq "first" ;
+ ok 223, $FA ? pop @array : $db->pop() eq "first" ;
+
+ undef $db;
+ untie @array ;
+
+}
+
+{
+ # 23 Sept 2001 -- unshift into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 224, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? unshift @array, "first"
+ : $db->unshift("first") ;
+
+ ok 225, $array[0] eq "first" ;
+ ok 226, ($FA ? shift @array : $db->shift()) eq "first" ;
+
+ undef $db;
+ untie @array ;
+
+}
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/storage/bdb/perl/BerkeleyDB/t/strict.t b/storage/bdb/perl/BerkeleyDB/t/strict.t
new file mode 100644
index 00000000000..ab41d44cb41
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/strict.t
@@ -0,0 +1,174 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..44\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # closing a database & an environment in the correct order.
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 3, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ ok 4, $db1->db_close() == 0 ;
+
+ eval { $status = $env->db_appexit() ; } ;
+ ok 5, $status == 0 ;
+ ok 6, $@ eq "" ;
+ #print "[$@]\n" ;
+
+}
+
+{
+ # closing an environment with an open database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 7, my $lexD = new LexDir($home);
+ ok 8, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 9, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ eval { $env->db_appexit() ; } ;
+ ok 10, $@ =~ /BerkeleyDB Aborting: attempted to close an environment with 1 open database/ ;
+ #print "[$@]\n" ;
+
+ undef $db1 ;
+ untie %hash ;
+ undef $env ;
+}
+
+{
+ # closing a transaction & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 11, my $lexD = new LexDir($home);
+ ok 12, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 13, my $txn = $env->txn_begin() ;
+ ok 14, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 15, $txn->txn_commit() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 16, $status == 0 ;
+ ok 17, $@ eq "" ;
+ #print "[$@]\n" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 18, $status == 0 ;
+ ok 19, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open transaction
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 20, my $lexD = new LexDir($home);
+ ok 21, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+ ok 23, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ eval { $db->db_close() ; } ;
+ ok 24, $@ =~ /BerkeleyDB Aborting: attempted to close a database while a transaction was still open at/ ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a cursor & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+ ok 25, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 26, my $cursor = $db->db_cursor() ;
+ ok 27, $cursor->c_close() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 28, $status == 0 ;
+ ok 29, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 30, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 31, my $cursor = $db->db_cursor() ;
+ eval { $db->db_close() ; } ;
+ ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a transaction & a cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 33, my $lexD = new LexDir($home);
+ ok 34, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 35, my $txn = $env->txn_begin() ;
+ ok 36, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+ ok 37, my $cursor = $db->db_cursor() ;
+ eval { $status = $cursor->c_close() ; } ;
+ ok 38, $status == 0 ;
+ ok 39, ($status = $txn->txn_commit()) == 0 ;
+ ok 40, $@ eq "" ;
+ eval { $status = $db->db_close() ; } ;
+ ok 41, $status == 0 ;
+ ok 42, $@ eq "" ;
+ #print "[$@]\n" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 43, $status == 0 ;
+ ok 44, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/subdb.t b/storage/bdb/perl/BerkeleyDB/t/subdb.t
new file mode 100644
index 00000000000..23016d6463f
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/subdb.t
@@ -0,0 +1,243 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..43\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Berkeley DB 3.x specific functionality
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' BerkeleyDB::db_remove -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' BerkeleyDB::db_remove -Bad => 2, -Filename => "fred", -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' BerkeleyDB::db_remove -Filename => "a", -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' BerkeleyDB::db_remove -Subname => "a"' ;
+ ok 4, $@ =~ /^Must specify a filename/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' BerkeleyDB::db_remove -Filename => "x", -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that doesn't have
+ # subdatabases at all should fail
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 7, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+ ok 8, ! $db ;
+
+ ok 9, -e $Dfile ;
+ ok 10, ! BerkeleyDB::db_remove(-Filename => $Dfile) ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that does have
+ # subdatabases at all, but not this one
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 11, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 12, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "joe" ;
+
+ ok 13, !$db ;
+
+}
+
+{
+ # subdatabases
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 14, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 15, addData($db, %data) ;
+
+ undef $db ;
+
+ ok 16, $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+
+ ok 17, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ if ($data{$k} eq $v) {
+ delete $data{$k} ;
+ }
+ }
+ ok 18, $status == DB_NOTFOUND ;
+ ok 19, keys %data == 0 ;
+}
+
+{
+ # subdatabases
+
+ # opening a database with multiple subdatabases - handle should be a list
+ # of the subdatabase names
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 20, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ ok 21, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Subname => "joe" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 22, addData($db1, %data) ;
+ ok 23, addData($db2, %data) ;
+
+ undef $db1 ;
+ undef $db2 ;
+
+ ok 24, my $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ #my $type = $db->type() ; print "type $type\n" ;
+ ok 25, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ my @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 26, $status == DB_NOTFOUND ;
+ ok 27, join(",", sort @dbnames) eq "fred,joe" ;
+ undef $db ;
+
+ ok 28, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "harry") != 0;
+ ok 29, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") == 0 ;
+
+ # should only be one subdatabase
+ ok 30, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 31, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 32, $status == DB_NOTFOUND ;
+ ok 33, join(",", sort @dbnames) eq "joe" ;
+ undef $db ;
+
+ # can't delete an already deleted subdatabase
+ ok 34, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") != 0;
+
+ ok 35, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "joe") == 0 ;
+
+ # should only be one subdatabase
+ ok 36, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 37, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 38, $status == DB_NOTFOUND ;
+ ok 39, @dbnames == 0 ;
+ undef $db ;
+ undef $cursor ;
+
+ ok 40, -e $Dfile ;
+ ok 41, BerkeleyDB::db_remove(-Filename => $Dfile) == 0 ;
+ ok 42, ! -e $Dfile ;
+ ok 43, BerkeleyDB::db_remove(-Filename => $Dfile) != 0 ;
+}
+
+# db_remove with env
diff --git a/storage/bdb/perl/BerkeleyDB/t/txn.t b/storage/bdb/perl/BerkeleyDB/t/txn.t
new file mode 100644
index 00000000000..ba6b636cdc8
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/txn.t
@@ -0,0 +1,320 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..58\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # error cases
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE| DB_INIT_MPOOL;
+ eval { $env->txn_begin() ; } ;
+ ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+
+ eval { my $txn_mgr = $env->TxnMgr() ; } ;
+ ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+ undef $env ;
+
+}
+
+{
+ # transaction - abort works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 5, my $lexD = new LexDir($home);
+ ok 6, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 7, my $txn = $env->txn_begin() ;
+ ok 8, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 9, $txn->txn_commit() == 0 ;
+ ok 10, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 11, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 12, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 13, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 14, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 15, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 16, $count == 0 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 17, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - abort works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 18, my $lexD = new LexDir($home);
+ ok 19, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 20, my $txn_mgr = $env->TxnMgr() ;
+ ok 21, my $txn = $txn_mgr->txn_begin() ;
+ ok 22, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 23, $txn->txn_commit() == 0 ;
+ ok 24, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 25, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 26, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 27, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 28, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 29, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 30, $count == 0 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 31, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 32, my $lexD = new LexDir($home);
+ ok 33, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 34, my $txn = $env->txn_begin() ;
+ ok 35, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 36, $txn->txn_commit() == 0 ;
+ ok 37, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 38, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 39, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 40, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 41, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 42, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 43, $count == 3 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 44, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 45, my $lexD = new LexDir($home);
+ ok 46, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 47, my $txn_mgr = $env->TxnMgr() ;
+ ok 48, my $txn = $txn_mgr->txn_begin() ;
+ ok 49, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 50, $txn->txn_commit() == 0 ;
+ ok 51, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 52, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 53, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 54, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 55, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 56, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 57, $count == 3 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 58, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
diff --git a/storage/bdb/perl/BerkeleyDB/t/unknown.t b/storage/bdb/perl/BerkeleyDB/t/unknown.t
new file mode 100644
index 00000000000..f2630b585c0
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/unknown.t
@@ -0,0 +1,176 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..41\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Unknown -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Unknown -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# check the interface to a rubbish database
+{
+ # first an empty file
+ my $lex = new LexFile $Dfile ;
+ ok 6, writeFile($Dfile, "") ;
+
+ ok 7, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+
+ # now a non-database file
+ writeFile($Dfile, "\x2af6") ;
+ ok 8, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+}
+
+# check the interface to a Hash database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 9, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 10, $db->db_put("some key", "some value") == 0 ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ ok 12, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 13, $db->type() == DB_HASH ;
+ ok 14, $db->db_get("some key", $value) == 0 ;
+ ok 15, $value eq "some value" ;
+ ok 16, $db->db_get("key", $value) == 0 ;
+ ok 17, $value eq "value" ;
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 18, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 19, $hash{"some key"} eq "some value" ;
+
+}
+
+# check the interface to a Btree database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 20, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 21, $db->db_put("some key", "some value") == 0 ;
+ ok 22, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 23, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 24, $db->type() == DB_BTREE ;
+ ok 25, $db->db_get("some key", $value) == 0 ;
+ ok 26, $value eq "some value" ;
+ ok 27, $db->db_get("key", $value) == 0 ;
+ ok 28, $value eq "value" ;
+
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 29, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 30, $hash{"some key"} eq "some value" ;
+
+
+}
+
+# check the interface to a Recno database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a recno database
+ ok 31, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 32, $db->db_put(0, "some value") == 0 ;
+ ok 33, $db->db_put(1, "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 34, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 35, $db->type() == DB_RECNO ;
+ ok 36, $db->db_get(0, $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $db->db_get(1, $value) == 0 ;
+ ok 39, $value eq "value" ;
+
+
+ my %hash ;
+ eval { $db->Tie(\%hash)} ;
+ ok 40, $@ =~ /^Tie needs a reference to an array/ ;
+
+ my @array ;
+ $db->Tie(\@array) ;
+ ok 41, $array[1] eq "value" ;
+
+
+}
+
+# check i/f to text
diff --git a/storage/bdb/perl/BerkeleyDB/t/util.pm b/storage/bdb/perl/BerkeleyDB/t/util.pm
new file mode 100644
index 00000000000..1a1449751eb
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/t/util.pm
@@ -0,0 +1,220 @@
+package util ;
+
+package main ;
+
+use strict ;
+use BerkeleyDB ;
+use File::Path qw(rmtree);
+use vars qw(%DB_errors $FA) ;
+
+$| = 1;
+
+%DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+# full tied array support started in Perl 5.004_57
+# just double check.
+$FA = 0 ;
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ use vars qw( $basename @files ) ;
+ $basename = "db0000" ;
+
+ sub new
+ {
+ my $self = shift ;
+ #my @files = () ;
+ foreach (@_)
+ {
+ $_ = $basename ;
+ unlink $basename ;
+ push @files, $basename ;
+ ++ $basename ;
+ }
+ bless [ @files ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ #unlink @{ $self } ;
+ }
+
+ END
+ {
+ foreach (@files) { unlink $_ }
+ }
+}
+
+
+{
+ package LexDir ;
+
+ use File::Path qw(rmtree);
+
+ use vars qw( $basename %dirs ) ;
+
+ sub new
+ {
+ my $self = shift ;
+ my $dir = shift ;
+
+ rmtree $dir if -e $dir ;
+
+ mkdir $dir, 0777 or return undef ;
+
+ return bless [ $dir ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ my $dir = $self->[0];
+ #rmtree $dir;
+ $dirs{$dir} ++ ;
+ }
+
+ END
+ {
+ foreach (keys %dirs) {
+ rmtree $_ if -d $_ ;
+ }
+ }
+
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub writeFile
+{
+ my $name = shift ;
+ open(FH, ">$name") or return 0 ;
+ print FH @_ ;
+ close FH ;
+ return 1 ;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" if @data % 2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+
+1;
diff --git a/storage/bdb/perl/BerkeleyDB/typemap b/storage/bdb/perl/BerkeleyDB/typemap
new file mode 100644
index 00000000000..81ead2c36d9
--- /dev/null
+++ b/storage/bdb/perl/BerkeleyDB/typemap
@@ -0,0 +1,275 @@
+# typemap for Perl 5 interface to Berkeley DB version 2 & 3
+#
+# SCCS: %I%, %G%
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+#
+#################################### DB SECTION
+#
+#
+
+void * T_PV
+u_int T_U_INT
+u_int32_t T_U_INT
+const char * T_PV_NULL
+PV_or_NULL T_PV_NULL
+IO_or_NULL T_IO_NULL
+
+AV * T_AV
+
+BerkeleyDB T_PTROBJ
+BerkeleyDB::Common T_PTROBJ_AV
+BerkeleyDB::Hash T_PTROBJ_AV
+BerkeleyDB::Btree T_PTROBJ_AV
+BerkeleyDB::Recno T_PTROBJ_AV
+BerkeleyDB::Queue T_PTROBJ_AV
+BerkeleyDB::Cursor T_PTROBJ_AV
+BerkeleyDB::TxnMgr T_PTROBJ_AV
+BerkeleyDB::Txn T_PTROBJ_AV
+BerkeleyDB::Log T_PTROBJ_AV
+BerkeleyDB::Lock T_PTROBJ_AV
+BerkeleyDB::Env T_PTROBJ_AV
+
+BerkeleyDB::Raw T_RAW
+BerkeleyDB::Common::Raw T_RAW
+BerkeleyDB::Hash::Raw T_RAW
+BerkeleyDB::Btree::Raw T_RAW
+BerkeleyDB::Recno::Raw T_RAW
+BerkeleyDB::Queue::Raw T_RAW
+BerkeleyDB::Cursor::Raw T_RAW
+BerkeleyDB::TxnMgr::Raw T_RAW
+BerkeleyDB::Txn::Raw T_RAW
+BerkeleyDB::Log::Raw T_RAW
+BerkeleyDB::Lock::Raw T_RAW
+BerkeleyDB::Env::Raw T_RAW
+
+BerkeleyDB::Env::Inner T_INNER
+BerkeleyDB::Common::Inner T_INNER
+BerkeleyDB::Txn::Inner T_INNER
+BerkeleyDB::TxnMgr::Inner T_INNER
+# BerkeleyDB__Env T_PTR
+DBT T_dbtdatum
+DBT_OPT T_dbtdatum_opt
+DBT_B T_dbtdatum_btree
+DBTKEY T_dbtkeydatum
+DBTKEY_B T_dbtkeydatum_btree
+DBTYPE T_U_INT
+DualType T_DUAL
+BerkeleyDB_type * T_IV
+BerkeleyDB_ENV_type * T_IV
+BerkeleyDB_TxnMgr_type * T_IV
+BerkeleyDB_Txn_type * T_IV
+BerkeleyDB__Cursor_type * T_IV
+DB * T_IV
+
+INPUT
+
+T_AV
+ if (SvROK($arg) && SvTYPE(SvRV($arg)) == SVt_PVAV)
+ /* if (sv_isa($arg, \"${ntype}\")) */
+ $var = (AV*)SvRV($arg);
+ else
+ croak(\"$var is not an array reference\")
+
+T_RAW
+ $var = INT2PTR($type,SvIV($arg)
+
+T_U_INT
+ $var = SvUV($arg)
+
+T_SV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV *)GetInternalObject($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_P_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_INNER
+ {
+ HV * hv = (HV *)SvRV($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+
+T_PV_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else {
+ $var = ($type)SvPV($arg,PL_na) ;
+ if (PL_na == 0)
+ $var = NULL ;
+ }
+
+T_IO_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else
+ $var = IoOFP(sv_2io($arg))
+
+T_PTROBJ_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_SELF
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_AV
+ if ($arg == &PL_sv_undef || $arg == NULL)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV(getInnerObject($arg)) ;
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_dbtkeydatum
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtkeydatum_btree
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue ||
+ (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtdatum
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+
+T_dbtdatum_opt
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+T_dbtdatum_btree
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+
+OUTPUT
+
+T_RAW
+ sv_setiv($arg, PTR2IV($var));
+
+T_SV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_P_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_DUAL
+ setDUALerrno($arg, $var) ;
+
+T_U_INT
+ sv_setuv($arg, (UV)$var);
+
+T_PV_NULL
+ sv_setpv((SV*)$arg, $var);
+
+T_dbtkeydatum_btree
+ OutputKey_B($arg, $var)
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_dbtdatum_opt
+ OutputValue($arg, $var)
+T_dbtdatum_btree
+ OutputValue_B($arg, $var)
+
+T_PTROBJ_NULL
+ sv_setref_pv($arg, \"${ntype}\", (void*)$var);
+
+T_PTROBJ_SELF
+ sv_setref_pv($arg, self, (void*)$var);
diff --git a/storage/bdb/perl/DB_File/Changes b/storage/bdb/perl/DB_File/Changes
new file mode 100644
index 00000000000..7883cbdfef0
--- /dev/null
+++ b/storage/bdb/perl/DB_File/Changes
@@ -0,0 +1,434 @@
+
+1.805 1st September 2002
+
+ * Added support to allow DB_File to build with Berkeley DB 4.1.X
+
+ * Tightened up the test harness to test that calls to untie don't generate
+ the "untie attempted while %d inner references still exist" warning.
+
+ * added code to guard against calling the callbacks (compare,hash & prefix)
+ recursively.
+
+ * pasing undef for the flags and/or mode when opening a database could cause
+ a "Use of uninitialized value in subroutine entry" warning. Now silenced.
+
+ * DBM filter code beefed up to cope with read-only $_.
+
+1.804 2nd June 2002
+
+ * Perl core patch 14939 added a new warning to "splice". This broke the
+ db-recno test harness. Fixed.
+
+ * merged core patches 16502 & 16540.
+
+1.803 1st March 2002
+
+ * Fixed a problem with db-btree.t where it complained about an "our"
+ variable redeclaation.
+
+ * FETCH, STORE & DELETE don't map the flags parameter into the
+ equivalent Berkeley DB function anymore.
+
+1.802 6th January 2002
+
+ * The message about some test failing in db-recno.t had the wrong test
+ numbers. Fixed.
+
+ * merged core patch 13942.
+
+1.801 26th November 2001
+
+ * Fixed typo in Makefile.PL
+
+ * Added "clean" attribute to Makefile.PL
+
+1.800 23rd November 2001
+
+ * use pport.h for perl backward compatability code.
+
+ * use new ExtUtils::Constant module to generate XS constants.
+
+ * upgrade Makefile.PL upgrade/downgrade code to toggle "our" with
+ "use vars"
+
+1.79 22nd October 2001
+
+ * Added a "local $SIG{__DIE__}" inside the eval that checks for
+ the presence of XSLoader s suggested by Andrew Hryckowin.
+
+ * merged core patch 12277.
+
+ * Changed NEXTKEY to not initialise the input key. It isn't used anyway.
+
+1.79 22nd October 2001
+
+ * Fixed test harness for cygwin
+
+1.78 30th July 2001
+
+ * the test in Makefile.PL for AIX used -plthreads. Should have been
+ -lpthreads
+
+ * merged Core patches
+ 10372, 10335, 10372, 10534, 10549, 10643, 11051, 11194, 11432
+
+ * added documentation patch regarding duplicate keys from Andrew Johnson
+
+
+1.77 26th April 2001
+
+ * AIX is reported to need -lpthreads, so Makefile.PL now checks for
+ AIX and adds it to the link options.
+
+ * Minor documentation updates.
+
+ * Merged Core patch 9176
+
+ * Added a patch from Edward Avis that adds support for splice with
+ recno databases.
+
+ * Modified Makefile.PL to only enable the warnings pragma if using perl
+ 5.6.1 or better.
+
+1.76 15th January 2001
+
+ * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
+ with DB_File on Linux. Thanks to Norbert Bollow for sending details of
+ this approach.
+
+
+1.75 17th December 2000
+
+ * Fixed perl core patch 7703
+
+ * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
+ btree_compare, btree_prefix and hash_cb needed to be changed.
+
+ * Updated dbinfo to support Berkeley DB 3.2 file format changes.
+
+
+1.74 10th December 2000
+
+ * A "close" call in DB_File.xs needed parenthesised to stop win32 from
+ thinking it was one of its macros.
+
+ * Updated dbinfo to support Berkeley DB 3.1 file format changes.
+
+ * DB_File.pm & the test hasness now use the warnings pragma (when
+ available).
+
+ * Included Perl core patch 7703 -- size argument for hash_cb is different
+ for Berkeley DB 3.x
+
+ * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
+ treatment.
+
+ * @a = () produced the warning 'Argument "" isn't numeric in entersub'
+ This has been fixed. Thanks to Edward Avis for spotting this bug.
+
+ * Added note about building under Linux. Included patches.
+
+ * Included Perl core patch 8068 -- fix for bug 20001013.009
+ When run with warnings enabled "$hash{XX} = undef " produced an
+ "Uninitialized value" warning. This has been fixed.
+
+1.73 31st May 2000
+
+ * Added support in version.c for building with threaded Perl.
+
+ * Berkeley DB 3.1 has reenabled support for null keys. The test
+ harness has been updated to reflect this.
+
+1.72 16th January 2000
+
+ * Added hints/sco.pl
+
+ * The module will now use XSLoader when it is available. When it
+ isn't it will use DynaLoader.
+
+ * The locking section in DB_File.pm has been discredited. Many thanks
+ to David Harris for spotting the underlying problem, contributing
+ the updates to the documentation and writing DB_File::Lock (available
+ on CPAN).
+
+1.71 7th September 1999
+
+ * Fixed a bug that prevented 1.70 from compiling under win32
+
+ * Updated to support Berkeley DB 3.x
+
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+
+1.70 4th August 1999
+
+ * Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+
+ * Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+
+1.69 3rd August 1999
+
+ * fixed a bug in push -- DB_APPEND wasn't working properly.
+
+ * Fixed the R_SETCURSOR bug introduced in 1.68
+
+ * Added a new Perl variable $DB_File::db_ver
+
+1.68 22nd July 1999
+
+ * Merged changes from 5.005_58
+
+ * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
+ 2 databases.
+
+ * Added some of the examples in the POD into the test harness.
+
+1.67 6th June 1999
+
+ * Added DBM Filter documentation to DB_File.pm
+
+ * Fixed DBM Filter code to work with 5.004
+
+ * A few instances of newSVpvn were used in 1.66. This isn't available in
+ Perl 5.004_04 or earlier. Replaced with newSVpv.
+
+1.66 15th March 1999
+
+ * Added DBM Filter code
+
+1.65 6th March 1999
+
+ * Fixed a bug in the recno PUSH logic.
+ * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+
+1.64 21st February 1999
+
+ * Tidied the 1.x to 2.x flag mapping code.
+ * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
+ mapping problem with O_RDONLY on the Hurd
+ * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+
+1.63 19th December 1998
+
+ * Fix to allow DB 2.6.x to build with DB_File
+ * Documentation updated to use push,pop etc in the RECNO example &
+ to include the find_dup & del_dup methods.
+
+1.62 30th November 1998
+
+ Added hints/dynixptx.pl.
+ Fixed typemap -- 1.61 used PL_na instead of na
+
+1.61 19th November 1998
+
+ Added a note to README about how to build Berkeley DB 2.x when
+ using HP-UX.
+ Minor modifications to get the module to build with DB 2.5.x
+ Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+
+1.60
+ Changed the test to check for full tied array support
+
+1.59
+ Updated the license section.
+
+ Berkeley DB 2.4.10 disallows zero length keys. Tests 32 & 42 in
+ db-btree.t and test 27 in db-hash.t failed because of this change.
+ Those tests have been zapped.
+
+ Added dbinfo to the distribution.
+
+1.58
+ Tied Array support was enhanced in Perl 5.004_57. DB_File now
+ supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
+
+ Fixed a problem with the use of sv_setpvn. When the size is
+ specified as 0, it does a strlen on the data. This was ok for DB
+ 1.x, but isn't for DB 2.x.
+
+1.57
+ If Perl has been compiled with Threads support,the symbol op will be
+ defined. This clashes with a field name in db.h, so it needs to be
+ #undef'ed before db.h is included.
+
+1.56
+ Documented the Solaris 2.5 mutex bug
+
+1.55
+ Merged 1.16 changes.
+
+1.54
+
+ Fixed a small bug in the test harness when run under win32
+ The emulation of fd when useing DB 2.x was busted.
+
+1.53
+
+ Added DB_RENUMBER to flags for recno.
+
+1.52
+
+ Patch from Nick Ing-Simmons now allows DB_File to build on NT.
+ Merged 1.15 patch.
+
+1.51
+
+ Fixed the test harness so that it doesn't expect DB_File to have
+ been installed by the main Perl build.
+
+
+ Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+
+1.50
+
+ DB_File can now build with either DB 1.x or 2.x, but not both at
+ the same time.
+
+1.16
+
+ A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
+
+ Small fix for the AIX strict C compiler XLC which doesn't like
+ __attribute__ being defined via proto.h and redefined via db.h. Fix
+ courtesy of Jarkko Hietaniemi.
+
+1.15
+
+ Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
+ value" warning with db_get and db_seq.
+
+ Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
+ O_* constants from Fcntl.
+
+ Removed the DESTROY method from the DB_File::HASHINFO module.
+
+ Previously DB_File hard-wired the class name of any object that it
+ created to "DB_File". This makes sub-classing difficult. Now
+ DB_File creats objects in the namespace of the package it has been
+ inherited into.
+
+
+1.14
+
+ Made it illegal to tie an associative array to a RECNO database and
+ an ordinary array to a HASH or BTREE database.
+
+1.13
+
+ Minor changes to DB_FIle.xs and DB_File.pm
+
+1.12
+
+ Documented the incompatibility with version 2 of Berkeley DB.
+
+1.11
+
+ Documented the untie gotcha.
+
+1.10
+
+ Fixed fd method so that it still returns -1 for in-memory files
+ when db 1.86 is used.
+
+1.09
+
+ Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
+ DB_File::BTREEINFO.
+
+ Changed default mode to 0666.
+
+1.08
+
+ Documented operation of bval.
+
+1.07
+
+ Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+
+1.06
+
+ Minor namespace cleanup: Localized PrintBtree.
+
+1.05
+
+ Made all scripts in the documentation strict and -w clean.
+
+ Added logic to DB_File.xs to allow the module to be built after
+ Perl is installed.
+
+1.04
+
+ Minor documentation changes.
+
+ Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
+ <hammen@gothamcity.jsc.nasa.govt>.
+
+ Fixed a bug with the constructors for DB_File::HASHINFO,
+ DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
+ constructors to make them -w clean.
+
+ Reworked part of the test harness to be more locale friendly.
+
+1.03
+
+ Documentation update.
+
+ DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
+ automatically.
+
+ The standard hash function exists is now supported.
+
+ Modified the behavior of get_dup. When it returns an associative
+ array, the value is the count of the number of matching BTREE
+ values.
+
+1.02
+
+ Merged OS/2 specific code into DB_File.xs
+
+ Removed some redundant code in DB_File.xs.
+
+ Documentation update.
+
+ Allow negative subscripts with RECNO interface.
+
+ Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+
+ The example code which showed how to lock a database needed a call
+ to sync added. Without it the resultant database file was empty.
+
+ Added get_dup method.
+
+1.01
+
+ Fixed a core dump problem with SunOS.
+
+ The return value from TIEHASH wasn't set to NULL when dbopen
+ returned an error.
+
+1.0
+
+ DB_File has been in use for over a year. To reflect that, the
+ version number has been incremented to 1.0.
+
+ Added complete support for multiple concurrent callbacks.
+
+ Using the push method on an empty list didn't work properly. This
+ has been fixed.
+
+0.3
+
+ Added prototype support for multiple btree compare callbacks.
+
+0.2
+
+ When DB_File is opening a database file it no longer terminates the
+ process if dbopen returned an error. This allows file protection
+ errors to be caught at run time. Thanks to Judith Grass
+ <grass@cybercash.com> for spotting the bug.
+
+0.1
+
+ First Release.
+
diff --git a/storage/bdb/perl/DB_File/DB_File.pm b/storage/bdb/perl/DB_File/DB_File.pm
new file mode 100644
index 00000000000..49004ffa148
--- /dev/null
+++ b/storage/bdb/perl/DB_File/DB_File.pm
@@ -0,0 +1,2291 @@
+# DB_File.pm -- Perl 5 interface to Berkeley DB
+#
+# written by Paul Marquess (Paul.Marquess@btinternet.com)
+# last modified 1st September 2002
+# version 1.805
+#
+# Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+
+package DB_File::HASHINFO ;
+
+require 5.00404;
+
+use warnings;
+use strict;
+use Carp;
+require Tie::Hash;
+@DB_File::HASHINFO::ISA = qw(Tie::Hash);
+
+sub new
+{
+ my $pkg = shift ;
+ my %x ;
+ tie %x, $pkg ;
+ bless \%x, $pkg ;
+}
+
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => {
+ bsize => 1,
+ ffactor => 1,
+ nelem => 1,
+ cachesize => 1,
+ hash => 2,
+ lorder => 1,
+ },
+ GOT => {}
+ }, $pkg ;
+}
+
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ return $self->{GOT}{$key} if exists $self->{VALID}{$key} ;
+
+ my $pkg = ref $self ;
+ croak "${pkg}::FETCH - Unknown element '$key'" ;
+}
+
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ my $type = $self->{VALID}{$key};
+
+ if ( $type )
+ {
+ croak "Key '$key' not associated with a code reference"
+ if $type == 2 && !ref $value && ref $value ne 'CODE';
+ $self->{GOT}{$key} = $value ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "${pkg}::STORE - Unknown element '$key'" ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ delete $self->{GOT}{$key} ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "DB_File::HASHINFO::DELETE - Unknown element '$key'" ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ exists $self->{VALID}{$key} ;
+}
+
+sub NotHere
+{
+ my $self = shift ;
+ my $method = shift ;
+
+ croak ref($self) . " does not define the method ${method}" ;
+}
+
+sub FIRSTKEY { my $self = shift ; $self->NotHere("FIRSTKEY") }
+sub NEXTKEY { my $self = shift ; $self->NotHere("NEXTKEY") }
+sub CLEAR { my $self = shift ; $self->NotHere("CLEAR") }
+
+package DB_File::RECNOINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::RECNOINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bval cachesize psize flags lorder reclen bfname )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+package DB_File::BTREEINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::BTREEINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => {
+ flags => 1,
+ cachesize => 1,
+ maxkeypage => 1,
+ minkeypage => 1,
+ psize => 1,
+ compare => 2,
+ prefix => 2,
+ lorder => 1,
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+
+package DB_File ;
+
+use warnings;
+use strict;
+our ($VERSION, @ISA, @EXPORT, $AUTOLOAD, $DB_BTREE, $DB_HASH, $DB_RECNO);
+our ($db_version, $use_XSLoader, $splice_end_array);
+use Carp;
+
+
+$VERSION = "1.805" ;
+
+{
+ local $SIG{__WARN__} = sub {$splice_end_array = "@_";};
+ my @a =(1); splice(@a, 3);
+ $splice_end_array =
+ ($splice_end_array =~ /^splice\(\) offset past end of array at /);
+}
+
+#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+$DB_BTREE = new DB_File::BTREEINFO ;
+$DB_HASH = new DB_File::HASHINFO ;
+$DB_RECNO = new DB_File::RECNOINFO ;
+
+require Tie::Hash;
+require Exporter;
+use AutoLoader;
+BEGIN {
+ $use_XSLoader = 1 ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+push @ISA, qw(Tie::Hash Exporter);
+@EXPORT = qw(
+ $DB_BTREE $DB_HASH $DB_RECNO
+
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+
+);
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
+
+
+eval {
+ # Make all Fcntl O_XXX constants available for importing
+ require Fcntl;
+ my @O = grep /^O_/, @Fcntl::EXPORT;
+ Fcntl->import(@O); # first we import what we want to export
+ push(@EXPORT, @O);
+};
+
+if ($use_XSLoader)
+ { XSLoader::load("DB_File", $VERSION)}
+else
+ { bootstrap DB_File $VERSION }
+
+# Preloaded methods go here. Autoload methods go after __END__, and are
+# processed by the autosplit program.
+
+sub tie_hash_or_array
+{
+ my (@arg) = @_ ;
+ my $tieHASH = ( (caller(1))[3] =~ /TIEHASH/ ) ;
+
+ $arg[4] = tied %{ $arg[4] }
+ if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
+
+ $arg[2] = O_CREAT()|O_RDWR() if @arg >=3 && ! defined $arg[2];
+ $arg[3] = 0666 if @arg >=4 && ! defined $arg[3];
+
+ # make recno in Berkeley DB version 2 work like recno in version 1.
+ if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and
+ $arg[1] and ! -e $arg[1]) {
+ open(FH, ">$arg[1]") or return undef ;
+ close FH ;
+ chmod $arg[3] ? $arg[3] : 0666 , $arg[1] ;
+ }
+
+ DoTie_($tieHASH, @arg) ;
+}
+
+sub TIEHASH
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub TIEARRAY
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub CLEAR
+{
+ my $self = shift;
+ my $key = 0 ;
+ my $value = "" ;
+ my $status = $self->seq($key, $value, R_FIRST());
+ my @keys;
+
+ while ($status == 0) {
+ push @keys, $key;
+ $status = $self->seq($key, $value, R_NEXT());
+ }
+ foreach $key (reverse @keys) {
+ my $s = $self->del($key);
+ }
+}
+
+sub EXTEND { }
+
+sub STORESIZE
+{
+ my $self = shift;
+ my $length = shift ;
+ my $current_length = $self->length() ;
+
+ if ($length < $current_length) {
+ my $key ;
+ for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+ { $self->del($key) }
+ }
+ elsif ($length > $current_length) {
+ $self->put($length-1, "") ;
+ }
+}
+
+
+sub SPLICE
+{
+ my $self = shift;
+ my $offset = shift;
+ if (not defined $offset) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $offset = 0;
+ }
+
+ my $length = @_ ? shift : 0;
+ # Carping about definedness comes _after_ the OFFSET sanity check.
+ # This is so we get the same error messages as Perl's splice().
+ #
+
+ my @list = @_;
+
+ my $size = $self->FETCHSIZE();
+
+ # 'If OFFSET is negative then it start that far from the end of
+ # the array.'
+ #
+ if ($offset < 0) {
+ my $new_offset = $size + $offset;
+ if ($new_offset < 0) {
+ die "Modification of non-creatable array value attempted, "
+ . "subscript $offset";
+ }
+ $offset = $new_offset;
+ }
+
+ if (not defined $length) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $length = 0;
+ }
+
+ if ($offset > $size) {
+ $offset = $size;
+ warnings::warnif('misc', 'splice() offset past end of array')
+ if $splice_end_array;
+ }
+
+ # 'If LENGTH is omitted, removes everything from OFFSET onward.'
+ if (not defined $length) {
+ $length = $size - $offset;
+ }
+
+ # 'If LENGTH is negative, leave that many elements off the end of
+ # the array.'
+ #
+ if ($length < 0) {
+ $length = $size - $offset + $length;
+
+ if ($length < 0) {
+ # The user must have specified a length bigger than the
+ # length of the array passed in. But perl's splice()
+ # doesn't catch this, it just behaves as for length=0.
+ #
+ $length = 0;
+ }
+ }
+
+ if ($length > $size - $offset) {
+ $length = $size - $offset;
+ }
+
+ # $num_elems holds the current number of elements in the database.
+ my $num_elems = $size;
+
+ # 'Removes the elements designated by OFFSET and LENGTH from an
+ # array,'...
+ #
+ my @removed = ();
+ foreach (0 .. $length - 1) {
+ my $old;
+ my $status = $self->get($offset, $old);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on get($offset, \$old)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+ push @removed, $old;
+
+ $status = $self->del($offset);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on del($offset)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ -- $num_elems;
+ }
+
+ # ...'and replaces them with the elements of LIST, if any.'
+ my $pos = $offset;
+ while (defined (my $elem = shift @list)) {
+ my $old_pos = $pos;
+ my $status;
+ if ($pos >= $num_elems) {
+ $status = $self->put($pos, $elem);
+ }
+ else {
+ $status = $self->put($pos, $elem, $self->R_IBEFORE);
+ }
+
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on put($pos, $elem, ...)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ", error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ die "pos unexpectedly changed from $old_pos to $pos with R_IBEFORE"
+ if $old_pos != $pos;
+
+ ++ $pos;
+ ++ $num_elems;
+ }
+
+ if (wantarray) {
+ # 'In list context, returns the elements removed from the
+ # array.'
+ #
+ return @removed;
+ }
+ elsif (defined wantarray and not wantarray) {
+ # 'In scalar context, returns the last element removed, or
+ # undef if no elements are removed.'
+ #
+ if (@removed) {
+ my $last = pop @removed;
+ return "$last";
+ }
+ else {
+ return undef;
+ }
+ }
+ elsif (not defined wantarray) {
+ # Void context
+ }
+ else { die }
+}
+sub ::DB_File::splice { &SPLICE }
+
+sub find_dup
+{
+ croak "Usage: \$db->find_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($origkey, $value_wanted) = @_ ;
+ my ($key, $value) = ($origkey, 0);
+ my ($status) = 0 ;
+
+ for ($status = $db->seq($key, $value, R_CURSOR() ) ;
+ $status == 0 ;
+ $status = $db->seq($key, $value, R_NEXT() ) ) {
+
+ return 0 if $key eq $origkey and $value eq $value_wanted ;
+ }
+
+ return $status ;
+}
+
+sub del_dup
+{
+ croak "Usage: \$db->del_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($key, $value) = @_ ;
+ my ($status) = $db->find_dup($key, $value) ;
+ return $status if $status != 0 ;
+
+ $status = $db->del($key, R_CURSOR() ) ;
+ return $status ;
+}
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $db->seq($key, $value, R_CURSOR()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $db->seq($key, $value, R_NEXT()) ) {
+
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DB_File - Perl5 access to Berkeley DB version 1.x
+
+=head1 SYNOPSIS
+
+ use DB_File;
+
+ [$X =] tie %hash, 'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
+ [$X =] tie %hash, 'DB_File', $filename, $flags, $mode, $DB_BTREE ;
+ [$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
+
+ $status = $X->del($key [, $flags]) ;
+ $status = $X->put($key, $value [, $flags]) ;
+ $status = $X->get($key, $value [, $flags]) ;
+ $status = $X->seq($key, $value, $flags) ;
+ $status = $X->sync([$flags]) ;
+ $status = $X->fd ;
+
+ # BTREE only
+ $count = $X->get_dup($key) ;
+ @list = $X->get_dup($key) ;
+ %list = $X->get_dup($key, 1) ;
+ $status = $X->find_dup($key, $value) ;
+ $status = $X->del_dup($key, $value) ;
+
+ # RECNO only
+ $a = $X->length;
+ $a = $X->pop ;
+ $X->push(list);
+ $a = $X->shift;
+ $X->unshift(list);
+ @r = $X->splice(offset, length, elements);
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ untie %hash ;
+ untie @array ;
+
+=head1 DESCRIPTION
+
+B<DB_File> is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1.x (if you have a newer
+version of DB, see L<Using DB_File with Berkeley DB version 2 or greater>).
+It is assumed that you have a copy of the Berkeley DB manual pages at
+hand when reading this documentation. The interface defined here
+mirrors the Berkeley DB interface closely.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. B<DB_File> provides an interface to all
+three of the database types currently supported by Berkeley DB.
+
+The file types are:
+
+=over 5
+
+=item B<DB_HASH>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using DB_HASH are not compatible with any of the
+other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most
+applications, is built into Berkeley DB. If you do need to use your own
+hashing algorithm it is possible to write your own in Perl and have
+B<DB_File> use it instead.
+
+=item B<DB_BTREE>
+
+The btree format allows arbitrary key/value pairs to be stored in a
+sorted, balanced binary tree.
+
+As with the DB_HASH format, it is possible to provide a user defined
+Perl routine to perform the comparison of keys. By default, though, the
+keys are stored in lexical order.
+
+=item B<DB_RECNO>
+
+DB_RECNO allows both fixed-length and variable-length flat text files
+to be manipulated using the same key/value pair interface as in DB_HASH
+and DB_BTREE. In this case the key will consist of a record (line)
+number.
+
+=back
+
+=head2 Using DB_File with Berkeley DB version 2 or greater
+
+Although B<DB_File> is intended to be used with Berkeley DB version 1,
+it can also be used with version 2, 3 or 4. In this case the interface is
+limited to the functionality provided by Berkeley DB 1.x. Anywhere the
+version 2 or greater interface differs, B<DB_File> arranges for it to work
+like version 1. This feature allows B<DB_File> scripts that were built
+with version 1 to be migrated to version 2 or greater without any changes.
+
+If you want to make use of the new features available in Berkeley DB
+2.x or greater, use the Perl module B<BerkeleyDB> instead.
+
+B<Note:> The database file format has changed multiple times in Berkeley
+DB version 2, 3 and 4. If you cannot recreate your databases, you
+must dump any existing databases with either the C<db_dump> or the
+C<db_dump185> utility that comes with Berkeley DB.
+Once you have rebuilt DB_File to use Berkeley DB version 2 or greater,
+your databases can be recreated using C<db_load>. Refer to the Berkeley DB
+documentation for further details.
+
+Please read L<"COPYRIGHT"> before using version 2.x or greater of Berkeley
+DB with DB_File.
+
+=head2 Interface to Berkeley DB
+
+B<DB_File> allows access to Berkeley DB files using the tie() mechanism
+in Perl 5 (for full details, see L<perlfunc/tie()>). This facility
+allows B<DB_File> to access Berkeley DB files using either an
+associative array (for DB_HASH & DB_BTREE file types) or an ordinary
+array (for the DB_RECNO file type).
+
+In addition to the tie() interface, it is also possible to access most
+of the functions provided in the Berkeley DB API directly.
+See L<THE API INTERFACE>.
+
+=head2 Opening a Berkeley DB Database File
+
+Berkeley DB uses the function dbopen() to open or create a database.
+Here is the C prototype for dbopen():
+
+ DB*
+ dbopen (const char * file, int flags, int mode,
+ DBTYPE type, const void * openinfo)
+
+The parameter C<type> is an enumeration which specifies which of the 3
+interface methods (DB_HASH, DB_BTREE or DB_RECNO) is to be used.
+Depending on which of these is actually chosen, the final parameter,
+I<openinfo> points to a data structure which allows tailoring of the
+specific interface method.
+
+This interface is handled slightly differently in B<DB_File>. Here is
+an equivalent call using B<DB_File>:
+
+ tie %array, 'DB_File', $filename, $flags, $mode, $DB_HASH ;
+
+The C<filename>, C<flags> and C<mode> parameters are the direct
+equivalent of their dbopen() counterparts. The final parameter $DB_HASH
+performs the function of both the C<type> and C<openinfo> parameters in
+dbopen().
+
+In the example above $DB_HASH is actually a pre-defined reference to a
+hash object. B<DB_File> has three of these pre-defined references.
+Apart from $DB_HASH, there is also $DB_BTREE and $DB_RECNO.
+
+The keys allowed in each of these pre-defined references is limited to
+the names used in the equivalent C structure. So, for example, the
+$DB_HASH reference will only allow keys called C<bsize>, C<cachesize>,
+C<ffactor>, C<hash>, C<lorder> and C<nelem>.
+
+To change one of these elements, just assign to it like this:
+
+ $DB_HASH->{'cachesize'} = 10000 ;
+
+The three predefined variables $DB_HASH, $DB_BTREE and $DB_RECNO are
+usually adequate for most applications. If you do need to create extra
+instances of these objects, constructors are available for each file
+type.
+
+Here are examples of the constructors and the valid options available
+for DB_HASH, DB_BTREE and DB_RECNO respectively.
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'bsize'} ;
+ $a->{'cachesize'} ;
+ $a->{'ffactor'};
+ $a->{'hash'} ;
+ $a->{'lorder'} ;
+ $a->{'nelem'} ;
+
+ $b = new DB_File::BTREEINFO ;
+ $b->{'flags'} ;
+ $b->{'cachesize'} ;
+ $b->{'maxkeypage'} ;
+ $b->{'minkeypage'} ;
+ $b->{'psize'} ;
+ $b->{'compare'} ;
+ $b->{'prefix'} ;
+ $b->{'lorder'} ;
+
+ $c = new DB_File::RECNOINFO ;
+ $c->{'bval'} ;
+ $c->{'cachesize'} ;
+ $c->{'psize'} ;
+ $c->{'flags'} ;
+ $c->{'lorder'} ;
+ $c->{'reclen'} ;
+ $c->{'bfname'} ;
+
+The values stored in the hashes above are mostly the direct equivalent
+of their C counterpart. Like their C counterparts, all are set to a
+default values - that means you don't have to set I<all> of the
+values when you only want to change one. Here is an example:
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'cachesize'} = 12345 ;
+ tie %y, 'DB_File', "filename", $flags, 0777, $a ;
+
+A few of the options need extra discussion here. When used, the C
+equivalent of the keys C<hash>, C<compare> and C<prefix> store pointers
+to C functions. In B<DB_File> these keys are used to store references
+to Perl subs. Below are templates for each of the subs:
+
+ sub hash
+ {
+ my ($data) = @_ ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+See L<Changing the BTREE sort order> for an example of using the
+C<compare> template.
+
+If you are using the DB_RECNO interface and you intend making use of
+C<bval>, you should check out L<The 'bval' Option>.
+
+=head2 Default Parameters
+
+It is possible to omit some or all of the final 4 parameters in the
+call to C<tie> and let them take default values. As DB_HASH is the most
+common file format used, the call:
+
+ tie %A, "DB_File", "filename" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", "filename", O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+It is also possible to omit the filename parameter as well, so the
+call:
+
+ tie %A, "DB_File" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", undef, O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+See L<In Memory Databases> for a discussion on the use of C<undef>
+in place of a filename.
+
+=head2 In Memory Databases
+
+Berkeley DB allows the creation of in-memory databases by using NULL
+(that is, a C<(char *)0> in C) in place of the filename. B<DB_File>
+uses C<undef> instead of NULL to provide this functionality.
+
+=head1 DB_HASH
+
+The DB_HASH file format is probably the most commonly used of the three
+file formats that B<DB_File> supports. It is also very straightforward
+to use.
+
+=head2 A Simple Example
+
+This example shows how to create a database, add key/value pairs to the
+database, delete keys/value pairs and finally how to enumerate the
+contents of the database.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ our (%h, $k, $v) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0666, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved is in an apparently random order.
+
+=head1 DB_BTREE
+
+The DB_BTREE format is useful when you want to store data in a given
+order. By default the keys will be stored in lexical order, but as you
+will see from the example shown in the next section, it is very easy to
+define your own sorting function.
+
+=head2 Changing the BTREE sort order
+
+This script shows how to override the default sorting algorithm that
+BTREE uses. Instead of using the normal lexical ordering, a case
+insensitive compare function will be used.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=item 3
+
+Duplicate keys are entirely defined by the comparison function.
+In the case-insensitive example above, the keys: 'KEY' and 'key'
+would be considered duplicates, and assigning to the second one
+would overwrite the first. If duplicates are allowed for (with the
+R_DUPS flag discussed below), only a single copy of duplicate keys
+is stored in the database --- so (again with example above) assigning
+three values to the keys: 'KEY', 'Key', and 'key' would leave just
+the first key: 'KEY' in the database with three values. For some
+situations this results in information loss, so care should be taken
+to provide fully qualified comparison functions when necessary.
+For example, the above comparison routine could be modified to
+additionally compare case-sensitively if two keys are equal in the
+case insensitive comparison:
+
+ sub compare {
+ my($key1, $key2) = @_;
+ lc $key1 cmp lc $key2 ||
+ $key1 cmp $key2;
+ }
+
+And now you will only have duplicates when the keys themselves
+are truly the same. (note: in versions of the db library prior to
+about November 1996, such duplicate keys were retained so it was
+possible to recover the original keys in sets of keys that
+compared as equal).
+
+
+=back
+
+=head2 Handling Duplicate Keys
+
+The BTREE file type optionally allows a single key to be associated
+with an arbitrary number of values. This option is enabled by setting
+the flags element of C<$DB_BTREE> to R_DUP when creating the database.
+
+There are some difficulties in using the tied hash interface if you
+want to manipulate a BTREE database with duplicate keys. Consider this
+code:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, %h) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (sort keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+Here is the output:
+
+ Smith -> John
+ Wall -> Larry
+ Wall -> Larry
+ Wall -> Larry
+ mouse -> mickey
+
+As you can see 3 records have been successfully created with key C<Wall>
+- the only thing is, when they are retrieved from the database they
+I<seem> to have the same value, namely C<Larry>. The problem is caused
+by the way that the associative array interface works. Basically, when
+the associative array interface is used to fetch the value associated
+with a given key, it will only ever retrieve the first value.
+
+Although it may not be immediately obvious from the code above, the
+associative array interface can be used to write values with duplicate
+keys, but it cannot be used to read them back from the database.
+
+The way to get around this problem is to use the Berkeley DB API method
+called C<seq>. This method allows sequential access to key/value
+pairs. See L<THE API INTERFACE> for details of both the C<seq> method
+and the API in general.
+
+Here is the script above rewritten using the C<seq> API method.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $status, $key, $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+ undef $x ;
+ untie %h ;
+
+that prints:
+
+ Smith -> John
+ Wall -> Brick
+ Wall -> Brick
+ Wall -> Larry
+ mouse -> mickey
+
+This time we have got all the key/value pairs, including the multiple
+values associated with the key C<Wall>.
+
+To make life easier when dealing with duplicate keys, B<DB_File> comes with
+a few utility methods.
+
+=head2 The get_dup() Method
+
+The C<get_dup> method assists in
+reading duplicate values from BTREE databases. The method can take the
+following forms:
+
+ $count = $x->get_dup($key) ;
+ @list = $x->get_dup($key) ;
+ %list = $x->get_dup($key, 1) ;
+
+In a scalar context the method returns the number of values associated
+with the key, C<$key>.
+
+In list context, it returns all the values which match C<$key>. Note
+that the values will be returned in an apparently random order.
+
+In list context, if the second parameter is present and evaluates
+TRUE, the method returns an associative array. The keys of the
+associative array correspond to the values that matched in the BTREE
+and the values of the array are a count of the number of times that
+particular value occurred in the BTREE.
+
+So assuming the database created above, we can use C<get_dup> like
+this:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+
+and it will print:
+
+ Wall occurred 3 times
+ Larry is there
+ There are 2 Brick Walls
+ Wall => [Brick Brick Larry]
+ Smith => [John]
+ Dog => []
+
+=head2 The find_dup() Method
+
+ $status = $X->find_dup($key, $value) ;
+
+This method checks for the existence of a specific key/value pair. If the
+pair exists, the cursor is left pointing to the pair and the method
+returns 0. Otherwise the method returns a non-zero value.
+
+Assuming the database from the previous example:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is there
+ Harry Wall is not there
+
+
+=head2 The del_dup() Method
+
+ $status = $X->del_dup($key, $value) ;
+
+This method deletes a specific key/value pair. It returns
+0 if they exist and have been deleted successfully.
+Otherwise the method returns a non-zero value.
+
+Again assuming the existence of the C<tree> database
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is not there
+
+=head2 Matching Partial Keys
+
+The BTREE interface has a feature which allows partial keys to be
+matched. This functionality is I<only> available when the C<seq> method
+is used along with the R_CURSOR flag.
+
+ $x->seq($key, $value, R_CURSOR) ;
+
+Here is the relevant quote from the dbopen man page where it defines
+the use of the R_CURSOR flag with seq:
+
+ Note, for the DB_BTREE access method, the returned key is not
+ necessarily an exact match for the specified key. The returned key
+ is the smallest key greater than or equal to the specified key,
+ permitting partial key matches and range searches.
+
+In the example script below, the C<match> sub uses this feature to find
+and print the first matching key/value pair given a partial key.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($filename, $x, %h, $st, $key, $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+Here is the output:
+
+ IN ORDER
+ Smith -> John
+ Wall -> Larry
+ Walls -> Brick
+ mouse -> mickey
+
+ PARTIAL MATCH
+ Wa -> Wall -> Larry
+ A -> Smith -> John
+ a -> mouse -> mickey
+
+=head1 DB_RECNO
+
+DB_RECNO provides an interface to flat text files. Both variable and
+fixed length records are supported.
+
+In order to make RECNO more compatible with Perl, the array offset for
+all RECNO arrays begins at 0 rather than 1 as in Berkeley DB.
+
+As with normal Perl arrays, a RECNO array can be accessed using
+negative indexes. The index -1 refers to the last element of the array,
+-2 the second last, and so on. Attempting to access an element before
+the start of the array will raise a fatal run-time error.
+
+=head2 The 'bval' Option
+
+The operation of the bval option warrants some discussion. Here is the
+definition of bval from the Berkeley DB 1.85 recno manual page:
+
+ The delimiting byte to be used to mark the end of a
+ record for variable-length records, and the pad charac-
+ ter for fixed-length records. If no value is speci-
+ fied, newlines (``\n'') are used to mark the end of
+ variable-length records and fixed-length records are
+ padded with spaces.
+
+The second sentence is wrong. In actual fact bval will only default to
+C<"\n"> when the openinfo parameter in dbopen is NULL. If a non-NULL
+openinfo parameter is used at all, the value that happens to be in bval
+will be used. That means you always have to specify bval when making
+use of any of the options in the openinfo parameter. This documentation
+error will be fixed in the next release of Berkeley DB.
+
+That clarifies the situation with regards Berkeley DB itself. What
+about B<DB_File>? Well, the behavior defined in the quote above is
+quite useful, so B<DB_File> conforms to it.
+
+That means that you can specify other options (e.g. cachesize) and
+still have bval default to C<"\n"> for variable length records, and
+space for fixed length records.
+
+Also note that the bval option only allows you to specify a single byte
+as a delimeter.
+
+=head2 A Simple Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head2 Extra RECNO Methods
+
+If you are using a version of Perl earlier than 5.004_57, the tied
+array interface is quite limited. In the example script above
+C<push>, C<pop>, C<shift>, C<unshift>
+or determining the array length will not work with a tied array.
+
+To make the interface more useful for older versions of Perl, a number
+of methods are supplied with B<DB_File> to simulate the missing array
+operations. All these methods are accessed via the object returned from
+the tie call.
+
+Here are the methods:
+
+=over 5
+
+=item B<$X-E<gt>push(list) ;>
+
+Pushes the elements of C<list> to the end of the array.
+
+=item B<$value = $X-E<gt>pop ;>
+
+Removes and returns the last element of the array.
+
+=item B<$X-E<gt>shift>
+
+Removes and returns the first element of the array.
+
+=item B<$X-E<gt>unshift(list) ;>
+
+Pushes the elements of C<list> to the start of the array.
+
+=item B<$X-E<gt>length>
+
+Returns the number of elements in the array.
+
+=item B<$X-E<gt>splice(offset, length, elements);>
+
+Returns a splice of the the array.
+
+=back
+
+=head2 Another Example
+
+Here is a more complete example that makes use of some of the methods
+described above. It also makes use of the API interface directly (see
+L<THE API INTERFACE>).
+
+ use warnings ;
+ use strict ;
+ my (@h, $H, $file, $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+and this is what it outputs:
+
+ ORIGINAL
+ 0: zero
+ 1: one
+ 2: two
+ 3: three
+ 4: four
+
+ The last record was [four]
+ The first record was [zero]
+
+ REVERSE
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+ REVERSE again
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+Notes:
+
+=over 5
+
+=item 1.
+
+Rather than iterating through the array, C<@h> like this:
+
+ foreach $i (@h)
+
+it is necessary to use either this:
+
+ foreach $i (0 .. $H->length - 1)
+
+or this:
+
+ for ($a = $H->get($k, $v, R_FIRST) ;
+ $a == 0 ;
+ $a = $H->get($k, $v, R_NEXT) )
+
+=item 2.
+
+Notice that both times the C<put> method was used the record index was
+specified using a variable, C<$i>, rather than the literal value
+itself. This is because C<put> will return the record number of the
+inserted line via that parameter.
+
+=back
+
+=head1 THE API INTERFACE
+
+As well as accessing Berkeley DB using a tied hash or array, it is also
+possible to make direct use of most of the API functions defined in the
+Berkeley DB documentation.
+
+To do this you need to store a copy of the object returned from the tie.
+
+ $db = tie %hash, "DB_File", "filename" ;
+
+Once you have done that, you can access the Berkeley DB API functions
+as B<DB_File> methods directly like this:
+
+ $db->put($key, $value, R_NOOVERWRITE) ;
+
+B<Important:> If you have saved a copy of the object returned from
+C<tie>, the underlying database file will I<not> be closed until both
+the tied variable is untied and all copies of the saved object are
+destroyed.
+
+ use DB_File ;
+ $db = tie %hash, "DB_File", "filename"
+ or die "Cannot tie filename: $!" ;
+ ...
+ undef $db ;
+ untie %hash ;
+
+See L<The untie() Gotcha> for more details.
+
+All the functions defined in L<dbopen> are available except for
+close() and dbopen() itself. The B<DB_File> method interface to the
+supported functions have been implemented to mirror the way Berkeley DB
+works whenever possible. In particular note that:
+
+=over 5
+
+=item *
+
+The methods return a status value. All return 0 on success.
+All return -1 to signify an error and set C<$!> to the exact
+error code. The return code 1 generally (but not always) means that the
+key specified did not exist in the database.
+
+Other return codes are defined. See below and in the Berkeley DB
+documentation for details. The Berkeley DB documentation should be used
+as the definitive source.
+
+=item *
+
+Whenever a Berkeley DB function returns data via one of its parameters,
+the equivalent B<DB_File> method does exactly the same.
+
+=item *
+
+If you are careful, it is possible to mix API calls with the tied
+hash/array interface in the same piece of code. Although only a few of
+the methods used to implement the tied interface currently make use of
+the cursor, you should always assume that the cursor has been changed
+any time the tied hash/array interface is used. As an example, this
+code will probably not do what you expect:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the second key/value pair.
+ # oops, it didn't, it got the last key/value pair!
+ $X->seq($key, $value, R_NEXT) ;
+
+The code above can be rearranged to get around the problem, like this:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # Get the second key/value pair.
+ # worked this time.
+ $X->seq($key, $value, R_NEXT) ;
+
+=back
+
+All the constants defined in L<dbopen> for use in the flags parameters
+in the methods defined below are also available. Refer to the Berkeley
+DB documentation for the precise meaning of the flags values.
+
+Below is a list of the methods available.
+
+=over 5
+
+=item B<$status = $X-E<gt>get($key, $value [, $flags]) ;>
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. The value read from the database is returned in the
+C<$value> parameter.
+
+If the key does not exist the method returns 1.
+
+No flags are currently defined for this method.
+
+=item B<$status = $X-E<gt>put($key, $value [, $flags]) ;>
+
+Stores the key/value pair in the database.
+
+If you use either the R_IAFTER or R_IBEFORE flags, the C<$key> parameter
+will have the record number of the inserted key/value pair set.
+
+Valid flags are R_CURSOR, R_IAFTER, R_IBEFORE, R_NOOVERWRITE and
+R_SETCURSOR.
+
+=item B<$status = $X-E<gt>del($key [, $flags]) ;>
+
+Removes all key/value pairs with key C<$key> from the database.
+
+A return code of 1 means that the requested key was not in the
+database.
+
+R_CURSOR is the only valid flag at present.
+
+=item B<$status = $X-E<gt>fd ;>
+
+Returns the file descriptor for the underlying database.
+
+See L<Locking: The Trouble with fd> for an explanation for why you should
+not use C<fd> to lock your database.
+
+=item B<$status = $X-E<gt>seq($key, $value, $flags) ;>
+
+This interface allows sequential retrieval from the database. See
+L<dbopen> for full details.
+
+Both the C<$key> and C<$value> parameters will be set to the key/value
+pair read from the database.
+
+The flags parameter is mandatory. The valid flag values are R_CURSOR,
+R_FIRST, R_LAST, R_NEXT and R_PREV.
+
+=item B<$status = $X-E<gt>sync([$flags]) ;>
+
+Flushes any cached buffers to disk.
+
+R_RECNOSYNC is the only valid flag at present.
+
+=back
+
+=head1 DBM FILTERS
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a
+DBM database.
+
+There are four methods associated with DBM Filters. All work identically,
+and each is used to install (or uninstall) a single DBM Filter. Each
+expects a single parameter, namely a reference to a sub. The only
+difference between them is the place that the filter is installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database
+that you need to share with a third-party C application. The C application
+assumes that I<all> keys and values are NULL terminated. Unfortunately
+when Perl writes to DBM databases it doesn't use NULL termination, so
+your Perl application will have to manage NULL termination itself. When
+you write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "soemthing" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 HINTS AND TIPS
+
+
+=head2 Locking: The Trouble with fd
+
+Until version 1.72 of this module, the recommended technique for locking
+B<DB_File> databases was to flock the filehandle returned from the "fd"
+function. Unfortunately this technique has been shown to be fundamentally
+flawed (Kudos to David Harris for tracking this down). Use it at your own
+peril!
+
+The locking technique went like this.
+
+ $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0666)
+ || die "dbcreat /tmp/foo.db $!";
+ $fd = $db->fd;
+ open(DB_FH, "+<&=$fd") || die "dup $!";
+ flock (DB_FH, LOCK_EX) || die "flock: $!";
+ ...
+ $db{"Tom"} = "Jerry" ;
+ ...
+ flock(DB_FH, LOCK_UN);
+ undef $db;
+ untie %db;
+ close(DB_FH);
+
+In simple terms, this is what happens:
+
+=over 5
+
+=item 1.
+
+Use "tie" to open the database.
+
+=item 2.
+
+Lock the database with fd & flock.
+
+=item 3.
+
+Read & Write to the database.
+
+=item 4.
+
+Unlock and close the database.
+
+=back
+
+Here is the crux of the problem. A side-effect of opening the B<DB_File>
+database in step 2 is that an initial block from the database will get
+read from disk and cached in memory.
+
+To see why this is a problem, consider what can happen when two processes,
+say "A" and "B", both want to update the same B<DB_File> database
+using the locking steps outlined above. Assume process "A" has already
+opened the database and has a write lock, but it hasn't actually updated
+the database yet (it has finished step 2, but not started step 3 yet). Now
+process "B" tries to open the same database - step 1 will succeed,
+but it will block on step 2 until process "A" releases the lock. The
+important thing to notice here is that at this point in time both
+processes will have cached identical initial blocks from the database.
+
+Now process "A" updates the database and happens to change some of the
+data held in the initial buffer. Process "A" terminates, flushing
+all cached data to disk and releasing the database lock. At this point
+the database on disk will correctly reflect the changes made by process
+"A".
+
+With the lock released, process "B" can now continue. It also updates the
+database and unfortunately it too modifies the data that was in its
+initial buffer. Once that data gets flushed to disk it will overwrite
+some/all of the changes process "A" made to the database.
+
+The result of this scenario is at best a database that doesn't contain
+what you expect. At worst the database will corrupt.
+
+The above won't happen every time competing process update the same
+B<DB_File> database, but it does illustrate why the technique should
+not be used.
+
+=head2 Safe ways to lock a database
+
+Starting with version 2.x, Berkeley DB has internal support for locking.
+The companion module to this one, B<BerkeleyDB>, provides an interface
+to this locking functionality. If you are serious about locking
+Berkeley DB databases, I strongly recommend using B<BerkeleyDB>.
+
+If using B<BerkeleyDB> isn't an option, there are a number of modules
+available on CPAN that can be used to implement locking. Each one
+implements locking differently and has different goals in mind. It is
+therefore worth knowing the difference, so that you can pick the right
+one for your application. Here are the three locking wrappers:
+
+=over 5
+
+=item B<Tie::DB_Lock>
+
+A B<DB_File> wrapper which creates copies of the database file for
+read access, so that you have a kind of a multiversioning concurrent read
+system. However, updates are still serial. Use for databases where reads
+may be lengthy and consistency problems may occur.
+
+=item B<Tie::DB_LockFile>
+
+A B<DB_File> wrapper that has the ability to lock and unlock the database
+while it is being used. Avoids the tie-before-flock problem by simply
+re-tie-ing the database when you get or drop a lock. Because of the
+flexibility in dropping and re-acquiring the lock in the middle of a
+session, this can be massaged into a system that will work with long
+updates and/or reads if the application follows the hints in the POD
+documentation.
+
+=item B<DB_File::Lock>
+
+An extremely lightweight B<DB_File> wrapper that simply flocks a lockfile
+before tie-ing the database and drops the lock after the untie. Allows
+one to use the same lockfile for multiple databases to avoid deadlock
+problems, if desired. Use for databases where updates are reads are
+quick and simple flock locking semantics are enough.
+
+=back
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings are
+not. See L<DBM FILTERS> for a generic way to work around this problem.
+
+Here is a real example. Netscape 2.0 keeps a record of the locations you
+visit along with the time you last visited them in a DB_HASH database.
+This is usually stored in the file F<~/.netscape/history.db>. The key
+field in the database is the location string and the value field is the
+time the location was last visited stored as a 4 byte binary value.
+
+If you haven't already guessed, the location string is stored with a
+terminating NULL. This means you need to be careful when accessing the
+database.
+
+Here is a snippet of code that is loosely based on Tom Christiansen's
+I<ggh> script (available from your nearest CPAN archive in
+F<authors/id/TOMC/scripts/nshist.gz>).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($dotdir, $HISTORY, %hist_db, $href, $binary_time, $date) ;
+ $dotdir = $ENV{HOME} || $ENV{LOGNAME};
+
+ $HISTORY = "$dotdir/.netscape/history.db";
+
+ tie %hist_db, 'DB_File', $HISTORY
+ or die "Cannot open $HISTORY: $!\n" ;;
+
+ # Dump the complete database
+ while ( ($href, $binary_time) = each %hist_db ) {
+
+ # remove the terminating NULL
+ $href =~ s/\x00$// ;
+
+ # convert the binary time into a user friendly string
+ $date = localtime unpack("V", $binary_time);
+ print "$date $href\n" ;
+ }
+
+ # check for the existence of a specific key
+ # remember to add the NULL
+ if ( $binary_time = $hist_db{"http://mox.perl.com/\x00"} ) {
+ $date = localtime unpack("V", $binary_time) ;
+ print "Last visited mox.perl.com on $date\n" ;
+ }
+ else {
+ print "Never visited mox.perl.com\n"
+ }
+
+ untie %hist_db ;
+
+=head2 The untie() Gotcha
+
+If you make use of the Berkeley DB API, it is I<very> strongly
+recommended that you read L<perltie/The untie Gotcha>.
+
+Even if you don't currently make use of the API interface, it is still
+worth reading it.
+
+Here is an example which illustrates the problem from a B<DB_File>
+perspective:
+
+ use DB_File ;
+ use Fcntl ;
+
+ my %x ;
+ my $X ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_TRUNC
+ or die "Cannot tie first time: $!" ;
+
+ $x{123} = 456 ;
+
+ untie %x ;
+
+ tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ or die "Cannot tie second time: $!" ;
+
+ untie %x ;
+
+When run, the script will produce this error message:
+
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+Although the error message above refers to the second tie() statement
+in the script, the source of the problem is really with the untie()
+statement that precedes it.
+
+Having read L<perltie> you will probably have already guessed that the
+error is caused by the extra copy of the tied object stored in C<$X>.
+If you haven't, then the problem boils down to the fact that the
+B<DB_File> destructor, DESTROY, will not be called until I<all>
+references to the tied object are destroyed. Both the tied variable,
+C<%x>, and C<$X> above hold a reference to the object. The call to
+untie() will destroy the first, but C<$X> still holds a valid
+reference, so the destructor will not get called and the database file
+F<tst.fil> will remain open. The fact that Berkeley DB then reports the
+attempt to open a database that is already open via the catch-all
+"Invalid argument" doesn't help.
+
+If you run the script with the C<-w> flag the error message becomes:
+
+ untie attempted while 1 inner references still exist at bad.file line 12.
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+which pinpoints the real problem. Finally the script can now be
+modified to fix the original problem by destroying the API object
+before the untie:
+
+ ...
+ $x{123} = 456 ;
+
+ undef $X ;
+ untie %x ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ ...
+
+
+=head1 COMMON QUESTIONS
+
+=head2 Why is there Perl source in my database?
+
+If you look at the contents of a database file created by DB_File,
+there can sometimes be part of a Perl script included in it.
+
+This happens because Berkeley DB uses dynamic memory to allocate
+buffers which will subsequently be written to the database file. Being
+dynamic, the memory could have been used for anything before DB
+malloced it. As Berkeley DB doesn't clear the memory once it has been
+allocated, the unused portions will contain random junk. In the case
+where a Perl script gets written to the database, the random junk will
+correspond to an area of dynamic memory that happened to be used during
+the compilation of the script.
+
+Unless you don't like the possibility of there being part of your Perl
+scripts embedded in a database file, this is nothing to worry about.
+
+=head2 How do I store complex data structures with DB_File?
+
+Although B<DB_File> cannot do this directly, there is a module which
+can layer transparently over B<DB_File> to accomplish this feat.
+
+Check out the MLDBM module, available on CPAN in the directory
+F<modules/by-module/MLDBM>.
+
+=head2 What does "Invalid Argument" mean?
+
+You will get this error message when one of the parameters in the
+C<tie> call is wrong. Unfortunately there are quite a few parameters to
+get wrong, so it can be difficult to figure out which one it is.
+
+Here are a couple of possibilities:
+
+=over 5
+
+=item 1.
+
+Attempting to reopen a database without closing it.
+
+=item 2.
+
+Using the O_WRONLY flag.
+
+=back
+
+=head2 What does "Bareword 'DB_File' not allowed" mean?
+
+You will encounter this particular error message when you have the
+C<strict 'subs'> pragma (or the full strict pragma) in your script.
+Consider this script:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %x ;
+ tie %x, DB_File, "filename" ;
+
+Running it produces the error in question:
+
+ Bareword "DB_File" not allowed while "strict subs" in use
+
+To get around the error, place the word C<DB_File> in either single or
+double quotes, like this:
+
+ tie %x, "DB_File", "filename" ;
+
+Although it might seem like a real pain, it is really worth the effort
+of having a C<use strict> in all your scripts.
+
+=head1 REFERENCES
+
+Articles that are either about B<DB_File> or make use of it.
+
+=over 5
+
+=item 1.
+
+I<Full-Text Searching in Perl>, Tim Kientzle (tkientzle@ddj.com),
+Dr. Dobb's Journal, Issue 295, January 1999, pp 34-41
+
+=back
+
+=head1 HISTORY
+
+Moved to the Changes file.
+
+=head1 BUGS
+
+Some older versions of Berkeley DB had problems with fixed length
+records using the RECNO file format. This problem has been fixed since
+version 1.85 of Berkeley DB.
+
+I am sure there are bugs in the code. If you do find any, or can
+suggest any enhancements, I would welcome your comments.
+
+=head1 AVAILABILITY
+
+B<DB_File> comes with the standard Perl source distribution. Look in
+the directory F<ext/DB_File>. Given the amount of time between releases
+of Perl the version that ships with Perl is quite likely to be out of
+date, so the most recent version can always be found on CPAN (see
+L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/DB_File>.
+
+This version of B<DB_File> will work with either version 1.x, 2.x or
+3.x of Berkeley DB, but is limited to the functionality provided by
+version 1.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+All versions of Berkeley DB are available there.
+
+Alternatively, Berkeley DB version 1 is available at your nearest CPAN
+archive in F<src/misc/db.1.85.tar.gz>.
+
+If you are running IRIX, then get Berkeley DB version 1 from
+F<http://reality.sgi.com/ariel>. It has the patches necessary to
+compile properly on IRIX 5.3.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<DB_File> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of
+ Berkeley DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details.
+
+
+=head1 SEE ALSO
+
+L<perl(1)>, L<dbopen(3)>, L<hash(3)>, L<recno(3)>, L<btree(3)>,
+L<dbmfilter>
+
+=head1 AUTHOR
+
+The DB_File interface was written by Paul Marquess
+E<lt>Paul.Marquess@btinternet.comE<gt>.
+Questions about the DB system itself may be addressed to
+E<lt>db@sleepycat.com<gt>.
+
+=cut
diff --git a/storage/bdb/perl/DB_File/DB_File.xs b/storage/bdb/perl/DB_File/DB_File.xs
new file mode 100644
index 00000000000..fba8dede791
--- /dev/null
+++ b/storage/bdb/perl/DB_File/DB_File.xs
@@ -0,0 +1,1951 @@
+/*
+
+ DB_File.xs -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 1st September 2002
+ version 1.805
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 0.1 - Initial Release
+ 0.2 - No longer bombs out if dbopen returns an error.
+ 0.3 - Added some support for multiple btree compares
+ 1.0 - Complete support for multiple callbacks added.
+ Fixed a problem with pushing a value onto an empty list.
+ 1.01 - Fixed a SunOS core dump problem.
+ The return value from TIEHASH wasn't set to NULL when
+ dbopen returned an error.
+ 1.02 - Use ALIAS to define TIEARRAY.
+ Removed some redundant commented code.
+ Merged OS2 code into the main distribution.
+ Allow negative subscripts with RECNO interface.
+ Changed the default flags to O_CREAT|O_RDWR
+ 1.03 - Added EXISTS
+ 1.04 - fixed a couple of bugs in hash_cb. Patches supplied by
+ Dave Hammen, hammen@gothamcity.jsc.nasa.gov
+ 1.05 - Added logic to allow prefix & hash types to be specified via
+ Makefile.PL
+ 1.06 - Minor namespace cleanup: Localized PrintBtree.
+ 1.07 - Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+ 1.08 - No change to DB_File.xs
+ 1.09 - Default mode for dbopen changed to 0666
+ 1.10 - Fixed fd method so that it still returns -1 for
+ in-memory files when db 1.86 is used.
+ 1.11 - No change to DB_File.xs
+ 1.12 - No change to DB_File.xs
+ 1.13 - Tidied up a few casts.
+ 1.14 - Made it illegal to tie an associative array to a RECNO
+ database and an ordinary array to a HASH or BTREE database.
+ 1.50 - Make work with both DB 1.x or DB 2.x
+ 1.51 - Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+ 1.52 - Patch from Gisle Aas <gisle@aas.no> to suppress "use of
+ undefined value" warning with db_get and db_seq.
+ 1.53 - Added DB_RENUMBER to flags for recno.
+ 1.54 - Fixed bug in the fd method
+ 1.55 - Fix for AIX from Jarkko Hietaniemi
+ 1.56 - No change to DB_File.xs
+ 1.57 - added the #undef op to allow building with Threads support.
+ 1.58 - Fixed a problem with the use of sv_setpvn. When the
+ size is specified as 0, it does a strlen on the data.
+ This was ok for DB 1.x, but isn't for DB 2.x.
+ 1.59 - No change to DB_File.xs
+ 1.60 - Some code tidy up
+ 1.61 - added flagSet macro for DB 2.5.x
+ fixed typo in O_RDONLY test.
+ 1.62 - No change to DB_File.xs
+ 1.63 - Fix to alllow DB 2.6.x to build.
+ 1.64 - Tidied up the 1.x to 2.x flags mapping code.
+ Added a patch from Mark Kettenis <kettenis@wins.uva.nl>
+ to fix a flag mapping problem with O_RDONLY on the Hurd
+ 1.65 - Fixed a bug in the PUSH logic.
+ Added BOOT check that using 2.3.4 or greater
+ 1.66 - Added DBM filter code
+ 1.67 - Backed off the use of newSVpvn.
+ Fixed DBM Filter code for Perl 5.004.
+ Fixed a small memory leak in the filter code.
+ 1.68 - fixed backward compatability bug with R_IAFTER & R_IBEFORE
+ merged in the 5.005_58 changes
+ 1.69 - fixed a bug in push -- DB_APPEND wasn't working properly.
+ Fixed the R_SETCURSOR bug introduced in 1.68
+ Added a new Perl variable $DB_File::db_ver
+ 1.70 - Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+ Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ Rewrote push
+ 1.72 - No change to DB_File.xs
+ 1.73 - No change to DB_File.xs
+ 1.74 - A call to open needed parenthesised to stop it clashing
+ with a win32 macro.
+ Added Perl core patches 7703 & 7801.
+ 1.75 - Fixed Perl core patch 7703.
+ Added suppport to allow DB_File to be built with
+ Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
+ needed to be changed.
+ 1.76 - No change to DB_File.xs
+ 1.77 - Tidied up a few types used in calling newSVpvn.
+ 1.78 - Core patch 10335, 10372, 10534, 10549, 11051 included.
+ 1.79 - NEXTKEY ignores the input key.
+ Added lots of casts
+ 1.800 - Moved backward compatability code into ppport.h.
+ Use the new constants code.
+ 1.801 - No change to DB_File.xs
+ 1.802 - No change to DB_File.xs
+ 1.803 - FETCH, STORE & DELETE don't map the flags parameter
+ into the equivalent Berkeley DB function anymore.
+ 1.804 - no change.
+ 1.805 - recursion detection added to the callbacks
+ Support for 4.1.X added.
+ Filter code can now cope with read-only $_
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#ifdef _NOT_CORE
+# include "ppport.h"
+#endif
+
+/* Mention DB_VERSION_MAJOR_CFG, DB_VERSION_MINOR_CFG, and
+ DB_VERSION_PATCH_CFG here so that Configure pulls them all in. */
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+/* #if DB_VERSION_MAJOR_CFG < 2 */
+#ifndef DB_VERSION_MAJOR
+# undef __attribute__
+#endif
+
+#ifdef COMPAT185
+# include <db_185.h>
+#else
+# include <db.h>
+#endif
+
+/* Wall starts with 5.7.x */
+
+#if PERL_REVISION > 5 || (PERL_REVISION == 5 && PERL_VERSION >= 7)
+
+/* Since we dropped the gccish definition of __attribute__ we will want
+ * to redefine dNOOP, however (so that dTHX continues to work). Yes,
+ * all this means that we can't do attribute checking on the DB_File,
+ * boo, hiss. */
+# ifndef DB_VERSION_MAJOR
+
+# undef dNOOP
+# define dNOOP extern int Perl___notused
+
+ /* Ditto for dXSARGS. */
+# undef dXSARGS
+# define dXSARGS \
+ dSP; dMARK; \
+ I32 ax = mark - PL_stack_base + 1; \
+ I32 items = sp - mark
+
+# endif
+
+/* avoid -Wall; DB_File xsubs never make use of `ix' setup for ALIASes */
+# undef dXSI32
+# define dXSI32 dNOOP
+
+#endif /* Perl >= 5.7 */
+
+#include <fcntl.h>
+
+/* #define TRACE */
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#ifdef DB_VERSION_MAJOR
+
+#if DB_VERSION_MAJOR == 2
+# define BERKELEY_DB_1_OR_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
+#endif
+
+/* map version 2 features & constants onto their version 1 equivalent */
+
+#ifdef DB_Prefix_t
+# undef DB_Prefix_t
+#endif
+#define DB_Prefix_t size_t
+
+#ifdef DB_Hash_t
+# undef DB_Hash_t
+#endif
+#define DB_Hash_t u_int32_t
+
+/* DBTYPE stays the same */
+/* HASHINFO, RECNOINFO and BTREEINFO map to DB_INFO */
+#if DB_VERSION_MAJOR == 2
+ typedef DB_INFO INFO ;
+#else /* DB_VERSION_MAJOR > 2 */
+# define DB_FIXEDLEN (0x8000)
+#endif /* DB_VERSION_MAJOR == 2 */
+
+/* version 2 has db_recno_t in place of recno_t */
+typedef db_recno_t recno_t;
+
+
+#define R_CURSOR DB_SET_RANGE
+#define R_FIRST DB_FIRST
+#define R_IAFTER DB_AFTER
+#define R_IBEFORE DB_BEFORE
+#define R_LAST DB_LAST
+#define R_NEXT DB_NEXT
+#define R_NOOVERWRITE DB_NOOVERWRITE
+#define R_PREV DB_PREV
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define R_SETCURSOR 0x800000
+#else
+# define R_SETCURSOR (-100)
+#endif
+
+#define R_RECNOSYNC 0
+#define R_FIXEDLEN DB_FIXEDLEN
+#define R_DUP DB_DUP
+
+
+#define db_HA_hash h_hash
+#define db_HA_ffactor h_ffactor
+#define db_HA_nelem h_nelem
+#define db_HA_bsize db_pagesize
+#define db_HA_cachesize db_cachesize
+#define db_HA_lorder db_lorder
+
+#define db_BT_compare bt_compare
+#define db_BT_prefix bt_prefix
+#define db_BT_flags flags
+#define db_BT_psize db_pagesize
+#define db_BT_cachesize db_cachesize
+#define db_BT_lorder db_lorder
+#define db_BT_maxkeypage
+#define db_BT_minkeypage
+
+
+#define db_RE_reclen re_len
+#define db_RE_flags flags
+#define db_RE_bval re_pad
+#define db_RE_bfname re_source
+#define db_RE_psize db_pagesize
+#define db_RE_cachesize db_cachesize
+#define db_RE_lorder db_lorder
+
+#define TXN NULL,
+
+#define do_SEQ(db, key, value, flag) (db->cursor->c_get)(db->cursor, &key, &value, flag)
+
+
+#define DBT_flags(x) x.flags = 0
+#define DB_flags(x, v) x |= v
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(flags, bitmask) ((flags) & (bitmask))
+#else
+# define flagSet(flags, bitmask) (((flags) & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#else /* db version 1.x */
+
+#define BERKELEY_DB_1
+#define BERKELEY_DB_1_OR_2
+
+typedef union INFO {
+ HASHINFO hash ;
+ RECNOINFO recno ;
+ BTREEINFO btree ;
+ } INFO ;
+
+
+#ifdef mDB_Prefix_t
+# ifdef DB_Prefix_t
+# undef DB_Prefix_t
+# endif
+# define DB_Prefix_t mDB_Prefix_t
+#endif
+
+#ifdef mDB_Hash_t
+# ifdef DB_Hash_t
+# undef DB_Hash_t
+# endif
+# define DB_Hash_t mDB_Hash_t
+#endif
+
+#define db_HA_hash hash.hash
+#define db_HA_ffactor hash.ffactor
+#define db_HA_nelem hash.nelem
+#define db_HA_bsize hash.bsize
+#define db_HA_cachesize hash.cachesize
+#define db_HA_lorder hash.lorder
+
+#define db_BT_compare btree.compare
+#define db_BT_prefix btree.prefix
+#define db_BT_flags btree.flags
+#define db_BT_psize btree.psize
+#define db_BT_cachesize btree.cachesize
+#define db_BT_lorder btree.lorder
+#define db_BT_maxkeypage btree.maxkeypage
+#define db_BT_minkeypage btree.minkeypage
+
+#define db_RE_reclen recno.reclen
+#define db_RE_flags recno.flags
+#define db_RE_bval recno.bval
+#define db_RE_bfname recno.bfname
+#define db_RE_psize recno.psize
+#define db_RE_cachesize recno.cachesize
+#define db_RE_lorder recno.lorder
+
+#define TXN
+
+#define do_SEQ(db, key, value, flag) (db->dbp->seq)(db->dbp, &key, &value, flag)
+#define DBT_flags(x)
+#define DB_flags(x, v)
+#define flagSet(flags, bitmask) ((flags) & (bitmask))
+
+#endif /* db version 1 */
+
+
+
+#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, 0)
+#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, 0)
+#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, 0)
+
+#define db_sync(db, flags) ((db->dbp)->sync)(db->dbp, flags)
+#define db_get(db, key, value, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#ifdef DB_VERSION_MAJOR
+#define db_DESTROY(db) (!db->aborted && ( db->cursor->c_close(db->cursor),\
+ (db->dbp->close)(db->dbp, 0) ))
+#define db_close(db) ((db->dbp)->close)(db->dbp, 0)
+#define db_del(db, key, flags) (flagSet(flags, R_CURSOR) \
+ ? ((db->cursor)->c_del)(db->cursor, 0) \
+ : ((db->dbp)->del)(db->dbp, NULL, &key, flags) )
+
+#else /* ! DB_VERSION_MAJOR */
+
+#define db_DESTROY(db) (!db->aborted && ((db->dbp)->close)(db->dbp))
+#define db_close(db) ((db->dbp)->close)(db->dbp)
+#define db_del(db, key, flags) ((db->dbp)->del)(db->dbp, &key, flags)
+#define db_put(db, key, value, flags) ((db->dbp)->put)(db->dbp, &key, &value, flags)
+
+#endif /* ! DB_VERSION_MAJOR */
+
+
+#define db_seq(db, key, value, flags) do_SEQ(db, key, value, flags)
+
+typedef struct {
+ DBTYPE type ;
+ DB * dbp ;
+ SV * compare ;
+ bool in_compare ;
+ SV * prefix ;
+ bool in_prefix ;
+ SV * hash ;
+ bool in_hash ;
+ bool aborted ;
+ int in_memory ;
+#ifdef BERKELEY_DB_1_OR_2
+ INFO info ;
+#endif
+#ifdef DB_VERSION_MAJOR
+ DBC * cursor ;
+#endif
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+
+ } DB_File_type;
+
+typedef DB_File_type * DB_File ;
+typedef DBT DBTKEY ;
+
+#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->type != DB_RECNO) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - 1); \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
+ } \
+ }
+
+#define my_SvUV32(sv) ((u_int32_t)SvUV(sv))
+
+#ifdef CAN_PROTOTYPE
+extern void __getBerkeleyDBInfo(void);
+#endif
+
+/* Internal Global Data */
+
+#define MY_CXT_KEY "DB_File::_guts" XS_VERSION
+
+typedef struct {
+ recno_t x_Value;
+ recno_t x_zero;
+ DB_File x_CurrentDB;
+ DBTKEY x_empty;
+} my_cxt_t;
+
+START_MY_CXT
+
+#define Value (MY_CXT.x_Value)
+#define zero (MY_CXT.x_zero)
+#define CurrentDB (MY_CXT.x_CurrentDB)
+#define empty (MY_CXT.x_empty)
+
+#define ERR_BUFF "DB_File::Error"
+
+#ifdef DB_VERSION_MAJOR
+
+static int
+#ifdef CAN_PROTOTYPE
+db_put(DB_File db, DBTKEY key, DBT value, u_int flags)
+#else
+db_put(db, key, value, flags)
+DB_File db ;
+DBTKEY key ;
+DBT value ;
+u_int flags ;
+#endif
+{
+ int status ;
+
+ if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) {
+ DBC * temp_cursor ;
+ DBT l_key, l_value;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor) != 0)
+#else
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor, 0) != 0)
+#endif
+ return (-1) ;
+
+ memset(&l_key, 0, sizeof(l_key));
+ l_key.data = key.data;
+ l_key.size = key.size;
+ memset(&l_value, 0, sizeof(l_value));
+ l_value.data = value.data;
+ l_value.size = value.size;
+
+ if ( temp_cursor->c_get(temp_cursor, &l_key, &l_value, DB_SET) != 0) {
+ (void)temp_cursor->c_close(temp_cursor);
+ return (-1);
+ }
+
+ status = temp_cursor->c_put(temp_cursor, &key, &value, flags);
+ (void)temp_cursor->c_close(temp_cursor);
+
+ return (status) ;
+ }
+
+
+ if (flagSet(flags, R_CURSOR)) {
+ return ((db->cursor)->c_put)(db->cursor, &key, &value, DB_CURRENT);
+ }
+
+ if (flagSet(flags, R_SETCURSOR)) {
+ if ((db->dbp)->put(db->dbp, NULL, &key, &value, 0) != 0)
+ return -1 ;
+ return ((db->cursor)->c_get)(db->cursor, &key, &value, DB_SET_RANGE);
+
+ }
+
+ return ((db->dbp)->put)(db->dbp, NULL, &key, &value, flags) ;
+
+}
+
+#endif /* DB_VERSION_MAJOR */
+
+static void
+tidyUp(DB_File db)
+{
+ /* db_DESTROY(db); */
+ db->aborted = TRUE ;
+}
+
+
+static int
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_compare(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_compare(db, key1, key2)
+DB * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif /* CAN_PROTOTYPE */
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_compare(const DBT *key1, const DBT *key2)
+#else
+btree_compare(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT ;
+ void * data1, * data2 ;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+
+ if (CurrentDB->in_compare) {
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_compare: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ CurrentDB->in_compare = TRUE;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_compare = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+
+}
+
+static DB_Prefix_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_prefix(db, key1, key2)
+Db * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(const DBT *key1, const DBT *key2)
+#else
+btree_prefix(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+ if (CurrentDB->in_prefix){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_prefix: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ CurrentDB->in_prefix = TRUE;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_prefix = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#ifdef BERKELEY_DB_1
+# define HASH_CB_SIZE_TYPE size_t
+#else
+# define HASH_CB_SIZE_TYPE u_int32_t
+#endif
+
+static DB_Hash_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+hash_cb(DB * db, const void *data, u_int32_t size)
+#else
+hash_cb(db, data, size)
+DB * db ;
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+hash_cb(const void *data, HASH_CB_SIZE_TYPE size)
+#else
+hash_cb(data, size)
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ dMY_CXT;
+ int retval ;
+ int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+ if (CurrentDB->in_hash){
+ tidyUp(CurrentDB);
+ croak ("DB_File hash callback: recursion detected\n") ;
+ }
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ /* DGH - Next two lines added to fix corrupted stack problem */
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ keep_CurrentDB->in_hash = TRUE;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_hash = FALSE;
+
+ SPAGAIN ;
+
+ if (count != 1){
+ tidyUp(CurrentDB);
+ croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
+ }
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+db_errcall_cb(const char * db_errpfx, char * buffer)
+#else
+db_errcall_cb(db_errpfx, buffer)
+const char * db_errpfx;
+char * buffer;
+#endif
+{
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintHash(INFO *hash)
+#else
+PrintHash(hash)
+INFO * hash ;
+#endif
+{
+ printf ("HASH Info\n") ;
+ printf (" hash = %s\n",
+ (hash->db_HA_hash != NULL ? "redefined" : "default")) ;
+ printf (" bsize = %d\n", hash->db_HA_bsize) ;
+ printf (" ffactor = %d\n", hash->db_HA_ffactor) ;
+ printf (" nelem = %d\n", hash->db_HA_nelem) ;
+ printf (" cachesize = %d\n", hash->db_HA_cachesize) ;
+ printf (" lorder = %d\n", hash->db_HA_lorder) ;
+
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintRecno(INFO *recno)
+#else
+PrintRecno(recno)
+INFO * recno ;
+#endif
+{
+ printf ("RECNO Info\n") ;
+ printf (" flags = %d\n", recno->db_RE_flags) ;
+ printf (" cachesize = %d\n", recno->db_RE_cachesize) ;
+ printf (" psize = %d\n", recno->db_RE_psize) ;
+ printf (" lorder = %d\n", recno->db_RE_lorder) ;
+ printf (" reclen = %ul\n", (unsigned long)recno->db_RE_reclen) ;
+ printf (" bval = %d 0x%x\n", recno->db_RE_bval, recno->db_RE_bval) ;
+ printf (" bfname = %d [%s]\n", recno->db_RE_bfname, recno->db_RE_bfname) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintBtree(INFO *btree)
+#else
+PrintBtree(btree)
+INFO * btree ;
+#endif
+{
+ printf ("BTREE Info\n") ;
+ printf (" compare = %s\n",
+ (btree->db_BT_compare ? "redefined" : "default")) ;
+ printf (" prefix = %s\n",
+ (btree->db_BT_prefix ? "redefined" : "default")) ;
+ printf (" flags = %d\n", btree->db_BT_flags) ;
+ printf (" cachesize = %d\n", btree->db_BT_cachesize) ;
+ printf (" psize = %d\n", btree->db_BT_psize) ;
+#ifndef DB_VERSION_MAJOR
+ printf (" maxkeypage = %d\n", btree->db_BT_maxkeypage) ;
+ printf (" minkeypage = %d\n", btree->db_BT_minkeypage) ;
+#endif
+ printf (" lorder = %d\n", btree->db_BT_lorder) ;
+}
+
+#else
+
+#define PrintRecno(recno)
+#define PrintHash(hash)
+#define PrintBtree(btree)
+
+#endif /* TRACE */
+
+
+static I32
+#ifdef CAN_PROTOTYPE
+GetArrayLength(pTHX_ DB_File db)
+#else
+GetArrayLength(db)
+DB_File db ;
+#endif
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+
+ return ((I32)RETVAL) ;
+}
+
+static recno_t
+#ifdef CAN_PROTOTYPE
+GetRecnoKey(pTHX_ DB_File db, I32 value)
+#else
+GetRecnoKey(db, value)
+DB_File db ;
+I32 value ;
+#endif
+{
+ if (value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(aTHX_ db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + 1 <= 0) {
+ tidyUp(db);
+ croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+ }
+
+ value = length + value + 1 ;
+ }
+ else
+ ++ value ;
+
+ return value ;
+}
+
+
+static DB_File
+#ifdef CAN_PROTOTYPE
+ParseOpenInfo(pTHX_ int isHASH, char *name, int flags, int mode, SV *sv)
+#else
+ParseOpenInfo(isHASH, name, flags, mode, sv)
+int isHASH ;
+char * name ;
+int flags ;
+int mode ;
+SV * sv ;
+#endif
+{
+
+#ifdef BERKELEY_DB_1_OR_2 /* Berkeley DB Version 1 or 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ void * openinfo = NULL ;
+ INFO * info = &RETVAL->info ;
+ STRLEN n_a;
+ dMY_CXT;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ info->db_HA_hash = hash_cb ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+ else
+ info->db_HA_hash = NULL ;
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ info->db_HA_ffactor = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ info->db_HA_nelem = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ info->db_HA_bsize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_HA_cachesize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_HA_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_compare = btree_compare ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_compare = NULL ;
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_prefix = btree_prefix ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_prefix = NULL ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_BT_flags = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_BT_cachesize = svp ? SvIV(*svp) : 0;
+
+#ifndef DB_VERSION_MAJOR
+ svp = hv_fetch(action, "minkeypage", 10, FALSE);
+ info->btree.minkeypage = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "maxkeypage", 10, FALSE);
+ info->btree.maxkeypage = svp ? SvIV(*svp) : 0;
+#endif
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_BT_psize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_BT_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+ openinfo = (void *)info ;
+
+ info->db_RE_flags = 0 ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_RE_flags = (u_long) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ info->db_RE_reclen = (size_t) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_RE_cachesize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_RE_psize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_RE_lorder = (int) (svp ? SvIV(*svp) : 0);
+
+#ifdef DB_VERSION_MAJOR
+ info->re_source = name ;
+ name = NULL ;
+#endif
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+#ifdef DB_VERSION_MAJOR
+ name = (char*) n_a ? ptr : NULL ;
+#else
+ info->db_RE_bfname = (char*) (n_a ? ptr : NULL) ;
+#endif
+ }
+ else
+#ifdef DB_VERSION_MAJOR
+ name = NULL ;
+#else
+ info->db_RE_bfname = NULL ;
+#endif
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+#ifdef DB_VERSION_MAJOR
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (info->flags & DB_FIXEDLEN) {
+ info->re_pad = value ;
+ info->flags |= DB_PAD ;
+ }
+ else {
+ info->re_delim = value ;
+ info->flags |= DB_DELIMITER ;
+ }
+
+ }
+#else
+ if (svp && SvOK(*svp))
+ {
+ if (SvPOK(*svp))
+ info->db_RE_bval = (u_char)*SvPV(*svp, n_a) ;
+ else
+ info->db_RE_bval = (u_char)(unsigned long) SvIV(*svp) ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+
+ }
+ else
+ {
+ if (info->db_RE_flags & R_FIXEDLEN)
+ info->db_RE_bval = (u_char) ' ' ;
+ else
+ info->db_RE_bval = (u_char) '\n' ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+ }
+#endif
+
+#ifdef DB_RENUMBER
+ info->flags |= DB_RENUMBER ;
+#endif
+
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+
+ /* OS2 Specific Code */
+#ifdef OS2
+#ifdef __EMX__
+ flags |= O_BINARY;
+#endif /* __EMX__ */
+#endif /* OS2 */
+
+#ifdef DB_VERSION_MAJOR
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 2.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = db_open(name, RETVAL->type, Flags, mode, NULL, openinfo, &RETVAL->dbp) ;
+ if (status == 0)
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor) ;
+#else
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+#endif
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+#else
+
+#if defined(DB_LIBRARY_COMPATIBILITY_API) && DB_VERSION_MAJOR > 2
+ RETVAL->dbp = __db185_open(name, flags, mode, RETVAL->type, openinfo) ;
+#else
+ RETVAL->dbp = dbopen(name, flags, mode, RETVAL->type, openinfo) ;
+#endif /* DB_LIBRARY_COMPATIBILITY_API */
+
+#endif
+
+ return (RETVAL) ;
+
+#else /* Berkeley DB Version > 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ DB * dbp ;
+ STRLEN n_a;
+ int status ;
+ dMY_CXT;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ status = db_create(&RETVAL->dbp, NULL,0) ;
+ /* printf("db_create returned %d %s\n", status, db_strerror(status)) ; */
+ if (status) {
+ RETVAL->dbp = NULL ;
+ return (RETVAL) ;
+ }
+ dbp = RETVAL->dbp ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_h_hash(dbp, hash_cb) ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ if (svp)
+ (void)dbp->set_h_ffactor(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ if (svp)
+ (void)dbp->set_h_nelem(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp));
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_compare(dbp, btree_compare) ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_prefix(dbp, btree_prefix) ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp)
+ (void)dbp->set_flags(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ int fixed = FALSE ;
+
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp) {
+ int flags = SvIV(*svp) ;
+ /* remove FIXDLEN, if present */
+ if (flags & DB_FIXEDLEN) {
+ fixed = TRUE ;
+ flags &= ~DB_FIXEDLEN ;
+ }
+ }
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp) {
+ status = dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
+ }
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp) {
+ status = dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp) {
+ status = dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = (int)SvIV(*svp) ;
+
+ if (fixed) {
+ status = dbp->set_re_pad(dbp, value) ;
+ }
+ else {
+ status = dbp->set_re_delim(dbp, value) ;
+ }
+
+ }
+
+ if (fixed) {
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ if (svp) {
+ u_int32_t len = my_SvUV32(*svp) ;
+ status = dbp->set_re_len(dbp, len) ;
+ }
+ }
+
+ if (name != NULL) {
+ status = dbp->set_re_source(dbp, name) ;
+ name = NULL ;
+ }
+
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+ name = (char*) n_a ? ptr : NULL ;
+ }
+ else
+ name = NULL ;
+
+
+ status = dbp->set_flags(dbp, (u_int32_t)DB_RENUMBER) ;
+
+ if (flags){
+ (void)dbp->set_flags(dbp, (u_int32_t)flags) ;
+ }
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+ {
+ u_int32_t Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 3.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+#ifdef AT_LEAST_DB_4_1
+ status = (RETVAL->dbp->open)(RETVAL->dbp, NULL, name, NULL, RETVAL->type,
+ Flags, mode) ;
+#else
+ status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type,
+ Flags, mode) ;
+#endif
+ /* printf("open returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status == 0) {
+ RETVAL->dbp->set_errcall(RETVAL->dbp, db_errcall_cb) ;
+
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+ /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+ }
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+
+ return (RETVAL) ;
+
+#endif /* Berkeley DB Version > 2 */
+
+} /* ParseOpenInfo */
+
+
+#include "constants.h"
+
+MODULE = DB_File PACKAGE = DB_File PREFIX = db_
+
+INCLUDE: constants.xs
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ MY_CXT_INIT;
+ __getBerkeleyDBInfo() ;
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(recno_t) ;
+ }
+
+
+
+DB_File
+db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_HASH)
+ int isHASH
+ char * dbtype
+ int flags
+ int mode
+ CODE:
+ {
+ char * name = (char *) NULL ;
+ SV * sv = (SV *) NULL ;
+ STRLEN n_a;
+
+ if (items >= 3 && SvOK(ST(2)))
+ name = (char*) SvPV(ST(2), n_a) ;
+
+ if (items == 6)
+ sv = ST(5) ;
+
+ RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ;
+ if (RETVAL->dbp == NULL)
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+int
+db_DESTROY(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+ Trace(("DESTROY %p\n", db));
+ CLEANUP:
+ Trace(("DESTROY %p done\n", db));
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+ safefree(db) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+
+
+int
+db_DELETE(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+
+
+int
+db_EXISTS(db, key)
+ DB_File db
+ DBTKEY key
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = (((db->dbp)->get)(db->dbp, TXN &key, &value, 0) == 0) ;
+ }
+ OUTPUT:
+ RETVAL
+
+void
+db_FETCH(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = db_get(db, key, value, flags) ;
+ ST(0) = sv_newmortal();
+ OutputValue(ST(0), value)
+ }
+
+int
+db_STORE(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ INIT:
+ CurrentDB = db ;
+
+
+void
+db_FIRSTKEY(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+void
+db_NEXTKEY(db, key)
+ DB_File db
+ DBTKEY key = NO_INIT
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_NEXT) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+#
+# These would be nice for RECNO
+#
+
+int
+unshift(db, ...)
+ DB_File db
+ ALIAS: UNSHIFT = 1
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ int i ;
+ int One ;
+ STRLEN n_a;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, DB_FIRST) ;
+ RETVAL = 0 ;
+#else
+ RETVAL = -1 ;
+#endif
+ for (i = items-1 ; i > 0 ; --i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ One = 1 ;
+ key.data = &One ;
+ key.size = sizeof(int) ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
+#else
+ RETVAL = (db->dbp->put)(db->dbp, &key, &value, R_IBEFORE) ;
+#endif
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+void
+pop(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: POP = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+
+ /* First get the final value */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv(ST(0), &PL_sv_undef);
+ }
+ }
+
+void
+shift(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: SHIFT = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBT value ;
+ DBTKEY key ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv (ST(0), &PL_sv_undef) ;
+ }
+ }
+
+
+I32
+push(db, ...)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: PUSH = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DB * Db = db->dbp ;
+ int i ;
+ STRLEN n_a;
+ int keyval ;
+
+ DBT_flags(key) ;
+ DBT_flags(value) ;
+ CurrentDB = db ;
+ /* Set the Cursor to the Last element */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+#ifndef DB_VERSION_MAJOR
+ if (RETVAL >= 0)
+#endif
+ {
+ if (RETVAL == 0)
+ keyval = *(int*)key.data ;
+ else
+ keyval = 0 ;
+ for (i = 1 ; i < items ; ++i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ ++ keyval ;
+ key.data = &keyval ;
+ key.size = sizeof(int) ;
+ RETVAL = (Db->put)(Db, TXN &key, &value, 0) ;
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+I32
+length(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT;
+ ALIAS: FETCHSIZE = 1
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(aTHX_ db) ;
+ OUTPUT:
+ RETVAL
+
+
+#
+# Now provide an interface to the rest of the DB functionality
+#
+
+int
+db_del(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_del(db, key, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_get(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_get(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ value
+
+int
+db_put(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_put(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_KEYEXIST)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) OutputKey(ST(1), key);
+
+int
+db_fd(db)
+ DB_File db
+ PREINIT:
+ dMY_CXT ;
+ CODE:
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = -1 ;
+ {
+ int status = 0 ;
+ status = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
+ if (status != 0)
+ RETVAL = -1 ;
+ }
+#else
+ RETVAL = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp) ) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+db_sync(db, flags=0)
+ DB_File db
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_sync(db, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_seq(db, key, value, flags)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ PREINIT:
+ dMY_CXT;
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_seq(db, key, value, flags);
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ value
+
+SV *
+filter_fetch_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_key, code) ;
+
+SV *
+filter_store_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_key, code) ;
+
+SV *
+filter_fetch_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_fetch_value, code) ;
+
+SV *
+filter_store_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ DBM_setFilter(db->filter_store_value, code) ;
+
diff --git a/storage/bdb/perl/DB_File/DB_File_BS b/storage/bdb/perl/DB_File/DB_File_BS
new file mode 100644
index 00000000000..9282c498811
--- /dev/null
+++ b/storage/bdb/perl/DB_File/DB_File_BS
@@ -0,0 +1,6 @@
+# NeXT needs /usr/lib/libposix.a to load along with DB_File.so
+if ( $dlsrc eq "dl_next.xs" ) {
+ @DynaLoader::dl_resolve_using = ( '/usr/lib/libposix.a' );
+}
+
+1;
diff --git a/storage/bdb/perl/DB_File/MANIFEST b/storage/bdb/perl/DB_File/MANIFEST
new file mode 100644
index 00000000000..b3e1a7bd85b
--- /dev/null
+++ b/storage/bdb/perl/DB_File/MANIFEST
@@ -0,0 +1,30 @@
+Changes
+DB_File.pm
+DB_File.xs
+DB_File_BS
+MANIFEST
+Makefile.PL
+README
+config.in
+dbinfo
+fallback.h
+fallback.xs
+hints/dynixptx.pl
+hints/sco.pl
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
+ppport.h
+t/db-btree.t
+t/db-hash.t
+t/db-recno.t
+typemap
+version.c
diff --git a/storage/bdb/perl/DB_File/Makefile.PL b/storage/bdb/perl/DB_File/Makefile.PL
new file mode 100644
index 00000000000..4c1565d8d01
--- /dev/null
+++ b/storage/bdb/perl/DB_File/Makefile.PL
@@ -0,0 +1,330 @@
+#! perl -w
+
+use strict ;
+use ExtUtils::MakeMaker 5.16 ;
+use Config ;
+
+die "DB_File needs Perl 5.004_05 or better. This is $]\n"
+ if $] <= 5.00404;
+
+my $VER_INFO ;
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+my $COMPAT185 = "" ;
+
+ParseCONFIG() ;
+
+my @files = ('DB_File.pm', glob "t/*.t") ;
+UpDowngrade(@files);
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# Solaris is special.
+#$LIBS .= " -lthread" if $^O eq 'solaris' ;
+
+# AIX is special.
+$LIBS .= " -lpthread" if $^O eq 'aix' ;
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'DB_File',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ #MAN3PODS => {}, # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'DB_File.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185",
+ OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
+ #OPTIMIZE => '-g',
+ 'depend' => { 'Makefile' => 'config.in',
+ 'version$(OBJ_EXT)' => 'version.c'},
+ 'clean' => { FILES => 'constants.h constants.xs' },
+ 'macro' => { INSTALLDIRS => 'perl', my_files => "@files" },
+ 'dist' => { COMPRESS => 'gzip', SUFFIX => 'gz',
+ DIST_DEFAULT => 'MyDoubleCheck tardist'},
+ );
+
+
+my @names = qw(
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+ );
+
+if (eval {require ExtUtils::Constant; 1}) {
+ # Check the constants above all appear in @EXPORT in DB_File.pm
+ my %names = map { $_, 1} @names;
+ open F, "<DB_File.pm" or die "Cannot open DB_File.pm: $!\n";
+ while (<F>)
+ {
+ last if /^\s*\@EXPORT\s+=\s+qw\(/ ;
+ }
+
+ while (<F>)
+ {
+ last if /^\s*\)/ ;
+ /(\S+)/ ;
+ delete $names{$1} if defined $1 ;
+ }
+ close F ;
+
+ if ( keys %names )
+ {
+ my $missing = join ("\n\t", sort keys %names) ;
+ die "The following names are missing from \@EXPORT in DB_File.pm\n" .
+ "\t$missing\n" ;
+ }
+
+
+ ExtUtils::Constant::WriteConstants(
+ NAME => 'DB_File',
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+
+ );
+}
+else {
+ use File::Copy;
+ copy ('fallback.h', 'constants.h')
+ or die "Can't copy fallback.h to constants.h: $!";
+ copy ('fallback.xs', 'constants.xs')
+ or die "Can't copy fallback.xs to constants.xs: $!";
+}
+
+exit;
+
+
+sub MY::postamble { <<'EOM' } ;
+
+MyDoubleCheck:
+ @echo Checking config.in is setup for a release
+ @(grep "^LIB.*/usr/local/BerkeleyDB" config.in && \
+ grep "^INCLUDE.*/usr/local/BerkeleyDB" config.in && \
+ grep "^#DBNAME.*" config.in) >/dev/null || \
+ (echo config.in needs fixing ; exit 1)
+ @echo config.in is ok
+ @echo
+ @echo Checking DB_File.xs is ok for a release.
+ @(perl -ne ' exit 1 if /^\s*#\s*define\s+TRACE/ ; ' DB_File.xs || \
+ (echo DB_File.xs needs fixing ; exit 1))
+ @echo DB_File.xs is ok
+ @echo
+ @echo Checking for $$^W in files: $(my_files)
+ @perl -ne ' \
+ exit 1 if /^\s*local\s*\(\s*\$$\^W\s*\)/;' $(my_files) || \
+ (echo found unexpected $$^W ; exit 1)
+ @echo No $$^W found.
+ @echo
+ @echo Checking for 'use vars' in files: $(my_files)
+ @perl -ne ' \
+ exit 0 if /^__(DATA|END)__/; \
+ exit 1 if /^\s*use\s+vars/;' $(my_files) || \
+ (echo found unexpected "use vars"; exit 1)
+ @echo No 'use vars' found.
+ @echo
+ @echo All files are OK for a release.
+ @echo
+
+EOM
+
+
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME & COMPAT185 are optional, so pretend they have
+ # been parsed.
+ delete $Parsed{'DBNAME'} ;
+ delete $Parsed{'COMPAT185'} ;
+ $Info{COMPAT185} = "No" ;
+
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
+ if (defined $ENV{'DB_FILE_COMPAT185'} &&
+ $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
+ $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
+ my $PREFIX = $Info{'PREFIX'} ;
+ my $HASH = $Info{'HASH'} ;
+
+ $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
+
+ print <<EOM if 0 ;
+ INCLUDE [$INC_DIR]
+ LIB [$LIB_DIR]
+ HASH [$HASH]
+ PREFIX [$PREFIX]
+ DBNAME [$DB_NAME]
+
+EOM
+
+ print "Looks Good.\n" ;
+
+}
+
+sub UpDowngrade
+{
+ my @files = @_ ;
+
+ # our is stable from 5.6.0 onward
+ # warnings is stable from 5.6.1 onward
+
+ # Note: this code assumes that each statement it modifies is not
+ # split across multiple lines.
+
+
+ my $warn_sub ;
+ my $our_sub ;
+
+ if ($] < 5.006001) {
+ # From: use|no warnings "blah"
+ # To: local ($^W) = 1; # use|no warnings "blah"
+ #
+ # and
+ #
+ # From: warnings::warnif(x,y);
+ # To: $^W && carp(y); # warnif -- x
+ $warn_sub = sub {
+ s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
+ s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
+
+ s/^(\s*)warnings::warnif\s*\((.*?)\s*,\s*(.*?)\)\s*;/${1}\$^W && carp($3); # warnif - $2/ ;
+ };
+ }
+ else {
+ # From: local ($^W) = 1; # use|no warnings "blah"
+ # To: use|no warnings "blah"
+ #
+ # and
+ #
+ # From: $^W && carp(y); # warnif -- x
+ # To: warnings::warnif(x,y);
+ $warn_sub = sub {
+ s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
+ s/^(\s*)\$\^W\s+\&\&\s*carp\s*\((.*?)\)\s*;\s*#\s*warnif\s*-\s*(.*)/${1}warnings::warnif($3, $2);/ ;
+ };
+ }
+
+ if ($] < 5.006000) {
+ $our_sub = sub {
+ if ( /^(\s*)our\s+\(\s*([^)]+\s*)\)/ ) {
+ my $indent = $1;
+ my $vars = join ' ', split /\s*,\s*/, $2;
+ $_ = "${indent}use vars qw($vars);\n";
+ }
+ };
+ }
+ else {
+ $our_sub = sub {
+ if ( /^(\s*)use\s+vars\s+qw\((.*?)\)/ ) {
+ my $indent = $1;
+ my $vars = join ', ', split ' ', $2;
+ $_ = "${indent}our ($vars);\n";
+ }
+ };
+ }
+
+ foreach (@files)
+ { doUpDown($our_sub, $warn_sub, $_) }
+}
+
+
+sub doUpDown
+{
+ my $our_sub = shift;
+ my $warn_sub = shift;
+
+ local ($^I) = ".bak" ;
+ local (@ARGV) = shift;
+
+ while (<>)
+ {
+ print, last if /^__(END|DATA)__/ ;
+
+ &{ $our_sub }();
+ &{ $warn_sub }();
+ print ;
+ }
+
+ return if eof ;
+
+ while (<>)
+ { print }
+}
+
+# end of file Makefile.PL
diff --git a/storage/bdb/perl/DB_File/README b/storage/bdb/perl/DB_File/README
new file mode 100644
index 00000000000..b09aa9d8aee
--- /dev/null
+++ b/storage/bdb/perl/DB_File/README
@@ -0,0 +1,458 @@
+ DB_File
+
+ Version 1.805
+
+ 1st Sep 2002
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+IMPORTANT NOTICE
+================
+
+If are using the locking technique described in older versions of
+DB_File, please read the section called "Locking: The Trouble with fd"
+in DB_File.pm immediately. The locking method has been found to be
+unsafe. You risk corrupting your data if you continue to use it.
+
+DESCRIPTION
+-----------
+
+DB_File is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1. (DB_File can be built
+version 2, 3 or 4 of Berkeley DB, but it will only support the 1.x
+features),
+
+If you want to make use of the new features available in Berkeley DB
+2.x, 3.x or 4.x, use the Perl module BerkeleyDB instead.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. DB_File provides an interface to all three
+of the database types (hash, btree and recno) currently supported by
+Berkeley DB.
+
+For further details see the documentation included at the end of the
+file DB_File.pm.
+
+PREREQUISITES
+-------------
+
+Before you can build DB_File you must have the following installed on
+your system:
+
+ * Perl 5.004_05 or greater.
+
+ * Berkeley DB.
+
+ The official web site for Berkeley DB is http://www.sleepycat.com.
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use DB_File
+ to access database files created by a third-party application, like
+ Sendmail or Netscape. In these cases you must build DB_File with a
+ compatible version of Berkeley DB.
+
+ If you want to use Berkeley DB 2.x, you must have version 2.3.4
+ or greater. If you want to use Berkeley DB 3.x or 4.x, any version
+ will do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
+ to use Berkeley DB version 2, 3 or 4, read either the Solaris Notes
+ or HP-UX Notes sections below. If you are running Linux please
+ read the Linux Notes section before proceeding.
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+
+ NOTE:
+ If you have a very old version of Berkeley DB (i.e. pre 1.85),
+ three of the tests in the recno test harness may fail (tests 51,
+ 53 and 55). You can safely ignore the errors if you're never
+ going to use the broken functionality (recno databases with a
+ modified bval). Otherwise you'll have to upgrade your DB
+ library.
+
+
+INSTALLATION
+------------
+
+ make install
+
+
+TROUBLESHOOTING
+===============
+
+Here are some of the common problems people encounter when building
+DB_File.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I/usr/local/include -Dbool=char -DHAS_BOOL
+ -O2 -DVERSION=\"1.64\" -DXS_VERSION=\"1.64\" -fpic
+ -I/usr/local/lib/perl5/i586-linux/5.00404/CORE -DmDB_Prefix_t=size_t
+ -DmDB_Hash_t=u_int32_t DB_File.c
+ DB_File.xs:101: db.h: No such file or directory
+
+or this:
+
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/DB_File/DB_File.so -shared
+ -L/usr/local/lib DB_File.o -L/usr/local/lib -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+
+Undefined symbol db_version
+---------------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /usr/bin/perl5.00404 -I./blib/arch -I./blib/lib
+ -I/usr/local/lib/perl5/i586-linux/5.00404 -I/usr/local/lib/perl5 -e 'use
+ Test::Harness qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........Can't load './blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: ./blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ db_version at /usr/local/lib/perl5/i586-linux/5.00404/DynaLoader.pm
+ line 166.
+
+ at t/db-btree.t line 21
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ dubious Test returned status 2 (wstat 512, 0x200)
+
+This error usually happens when you have both version 1 and version
+2 of Berkeley DB installed on your system and DB_File attempts to
+build using the db.h for Berkeley DB version 2 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because Berkeley
+DB version 1 doesn't have the symbol db_version.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+
+Undefined symbol dbopen
+-----------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ ...
+ t/db-btree..........Can't load 'blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ dbopen at /usr/local/lib/perl5/5.6.1/i586-linux/DynaLoader.pm line 206.
+ at t/db-btree.t line 23
+ Compilation failed in require at t/db-btree.t line 23.
+ ...
+
+This error usually happens when you have both version 1 and a more recent
+version of Berkeley DB installed on your system and DB_File attempts
+to build using the db.h for Berkeley DB version 1 and the newer version
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because versions
+of Berkeley DB newer than version 1 doesn't have the symbol dbopen.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00560 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_60/lib/5.00560/i586-linux
+ -I/home/paul/perl/install/5.005_60/lib/5.00560 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........
+ DB_File needs compatible versions of libdb & db.h
+ you have db.h version 2.3.7 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section
+ below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/DB_File/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+Solaris Notes
+-------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+DB_File test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX 10 Notes
+--------------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building DB_File with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (DB_File.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+HP-UX 11 Notes
+--------------
+
+Some people running the combination of HP-UX 11 and Berkeley DB 2.7.7 have
+reported getting this error when the run the test harness for DB_File
+
+ ...
+ lib/db-btree.........Can't call method "DELETE" on an undefined value at lib/db-btree.t line 216.
+ FAILED at test 26
+ lib/db-hash..........Can't call method "DELETE" on an undefined value at lib/db-hash.t line 183.
+ FAILED at test 22
+ ...
+
+The fix for this is to rebuild and install Berkeley DB with the bigfile
+option disabled.
+
+
+IRIX NOTES
+----------
+
+If you are running IRIX, and want to use Berkeley DB version 1, you can
+get it from http://reality.sgi.com/ariel. It has the patches necessary
+to compile properly on IRIX 5.3.
+
+
+FEEDBACK
+========
+
+How to report a problem with DB_File.
+
+When reporting any problem, I need the information requested below.
+
+ 1. The *complete* output from running this
+
+ perl -V
+
+ Do not edit the output in any way.
+ Note, I want you to run "perl -V" and NOT "perl -v".
+
+ If your perl does not understand the "-V" option it is too
+ old. DB_File needs Perl version 5.00405 or better.
+
+ 2. The version of DB_File you have.
+ If you have successfully installed DB_File, this one-liner will
+ tell you:
+
+ perl -e 'use DB_File; print qq{DB_File ver $DB_File::VERSION\n}'
+
+ If you haven't installed DB_File then search DB_File.pm for a line
+ like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you are using.
+ If you are using a version older than 1.85, think about upgrading. One
+ point to note if you are considering upgrading Berkeley DB - the
+ file formats for 1.85, 1.86, 2.0, 3.0 & 3.1 are all different.
+
+ If you have successfully installed DB_File, this command will display
+ the version of Berkeley DB it was built with:
+
+ perl -e 'use DB_File; print qq{Berkeley DB ver $DB_File::db_ver\n}'
+
+ 4. A copy the file config.in from the DB_File main source directory.
+
+ 5. A listing of directories where Berkeley DB is installed.
+ For example, if Berkeley DB is installed in /usr/BerkeleDB/lib and
+ /usr/BerkeleyDB/include, I need the output from running this
+
+ ls -l /usr/BerkeleyDB/lib
+ ls -l /usr/BerkeleyDB/include
+
+ 6. If you are having problems building DB_File, send me a complete log
+ of what happened. Start by unpacking the DB_File module into a fresh
+ directory and keep a log of all the steps
+
+ [edit config.in, if necessary]
+ perl Makefile.PL
+ make
+ make test TEST_VERBOSE=1
+
+ 7. Now the difficult one. If you think you have found a bug in DB_File
+ and you want me to fix it, you will *greatly* enhance the chances
+ of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
diff --git a/storage/bdb/perl/DB_File/config.in b/storage/bdb/perl/DB_File/config.in
new file mode 100644
index 00000000000..292b09a5fb3
--- /dev/null
+++ b/storage/bdb/perl/DB_File/config.in
@@ -0,0 +1,97 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 9th Sept 1997
+# version 1.55
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = /usr/local/include
+#INCLUDE = /usr/include
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+LIB = /usr/local/BerkeleyDB/lib
+#LIB = /usr/local/lib
+#LIB = /usr/lib
+
+# 3. What version of Berkely DB have you got?
+#
+# If you have version 2.0 or greater, you can skip this question.
+#
+# If you have Berkeley DB 1.78 or greater you shouldn't have to
+# change the definitions for PREFIX and HASH below.
+#
+# For older versions of Berkeley DB change both PREFIX and HASH to int.
+# Version 1.71, 1.72 and 1.73 are known to need this change.
+#
+# If you don't know what version you have have a look in the file db.h.
+#
+# Search for the string "DB_VERSION_MAJOR". If it is present, you
+# have Berkeley DB version 2 (or greater).
+#
+# If that didn't work, find the definition of the BTREEINFO typedef.
+# Check the return type from the prefix element. It should look like
+# this in an older copy of db.h:
+#
+# int (*prefix) __P((const DBT *, const DBT *));
+#
+# and like this in a more recent copy:
+#
+# size_t (*prefix) /* prefix function */
+# __P((const DBT *, const DBT *));
+#
+# Change the definition of PREFIX, below, to reflect the return type
+# of the prefix function in your db.h.
+#
+# Now find the definition of the HASHINFO typedef. Check the return
+# type of the hash element. Older versions look like this:
+#
+# int (*hash) __P((const void *, size_t));
+#
+# newer like this:
+#
+# u_int32_t /* hash function */
+# (*hash) __P((const void *, size_t));
+#
+# Change the definition of HASH, below, to reflect the return type of
+# the hash function in your db.h.
+#
+
+PREFIX = size_t
+HASH = u_int32_t
+
+# 4. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have both Berkeley DB 2.3.12 and 1.85 on your
+# system and you want to use the Berkeley DB version 2 library you
+# could rename the version 2 library from libdb.a to libdb-2.3.12.a and
+# change the DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.3.12
+#
+# That will ensure you are linking the correct version of the DB
+# library.
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-2.4.10
+
+# end of file config.in
diff --git a/storage/bdb/perl/DB_File/dbinfo b/storage/bdb/perl/DB_File/dbinfo
new file mode 100644
index 00000000000..af2c45facf5
--- /dev/null
+++ b/storage/bdb/perl/DB_File/dbinfo
@@ -0,0 +1,112 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/storage/bdb/perl/DB_File/fallback.h b/storage/bdb/perl/DB_File/fallback.h
new file mode 100644
index 00000000000..0213308a0ee
--- /dev/null
+++ b/storage/bdb/perl/DB_File/fallback.h
@@ -0,0 +1,455 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_TXN R_LAST R_NEXT R_PREV */
+ /* Offset 2 gives the best switch position. */
+ switch (name[2]) {
+ case 'L':
+ if (memEQ(name, "R_LAST", 6)) {
+ /* ^ */
+#ifdef R_LAST
+ *iv_return = R_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "R_NEXT", 6)) {
+ /* ^ */
+#ifdef R_NEXT
+ *iv_return = R_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_PREV", 6)) {
+ /* ^ */
+#ifdef R_PREV
+ *iv_return = R_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_TXN", 6)) {
+ /* ^ */
+#ifdef DB_TXN
+ *iv_return = DB_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK R_FIRST R_NOKEY */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'I':
+ if (memEQ(name, "R_FIRST", 7)) {
+ /* ^ */
+#ifdef R_FIRST
+ *iv_return = R_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCK", 7)) {
+ /* ^ */
+#ifdef DB_LOCK
+ *iv_return = DB_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "R_NOKEY", 7)) {
+ /* ^ */
+#ifdef R_NOKEY
+ *iv_return = R_NOKEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_SHMEM R_CURSOR R_IAFTER */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'M':
+ if (memEQ(name, "DB_SHMEM", 8)) {
+ /* ^ */
+#ifdef DB_SHMEM
+ *iv_return = DB_SHMEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "R_CURSOR", 8)) {
+ /* ^ */
+#ifdef R_CURSOR
+ *iv_return = R_CURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "R_IAFTER", 8)) {
+ /* ^ */
+#ifdef R_IAFTER
+ *iv_return = R_IAFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHMAGIC RET_ERROR R_IBEFORE */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'I':
+ if (memEQ(name, "HASHMAGIC", 9)) {
+ /* ^ */
+#ifdef HASHMAGIC
+ *iv_return = HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "RET_ERROR", 9)) {
+ /* ^ */
+#ifdef RET_ERROR
+ *iv_return = RET_ERROR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_IBEFORE", 9)) {
+ /* ^ */
+#ifdef R_IBEFORE
+ *iv_return = R_IBEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ BTREEMAGIC R_FIXEDLEN R_SNAPSHOT __R_UNUSED */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'E':
+ if (memEQ(name, "R_FIXEDLEN", 10)) {
+ /* ^ */
+#ifdef R_FIXEDLEN
+ *iv_return = R_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "BTREEMAGIC", 10)) {
+ /* ^ */
+#ifdef BTREEMAGIC
+ *iv_return = BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "__R_UNUSED", 10)) {
+ /* ^ */
+#ifdef __R_UNUSED
+ *iv_return = __R_UNUSED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_SNAPSHOT", 10)) {
+ /* ^ */
+#ifdef R_SNAPSHOT
+ *iv_return = R_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHVERSION RET_SPECIAL RET_SUCCESS R_RECNOSYNC R_SETCURSOR */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'C':
+ if (memEQ(name, "R_RECNOSYNC", 11)) {
+ /* ^ */
+#ifdef R_RECNOSYNC
+ *iv_return = R_RECNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "RET_SPECIAL", 11)) {
+ /* ^ */
+#ifdef RET_SPECIAL
+ *iv_return = RET_SPECIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "HASHVERSION", 11)) {
+ /* ^ */
+#ifdef HASHVERSION
+ *iv_return = HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_SETCURSOR", 11)) {
+ /* ^ */
+#ifdef R_SETCURSOR
+ *iv_return = R_SETCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "RET_SUCCESS", 11)) {
+ /* ^ */
+#ifdef RET_SUCCESS
+ *iv_return = RET_SUCCESS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!bleedperl -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV)};
+my @names = (qw(BTREEMAGIC BTREEVERSION DB_LOCK DB_SHMEM DB_TXN HASHMAGIC
+ HASHVERSION MAX_PAGE_NUMBER MAX_PAGE_OFFSET MAX_REC_NUMBER
+ RET_ERROR RET_SPECIAL RET_SUCCESS R_CURSOR R_DUP R_FIRST
+ R_FIXEDLEN R_IAFTER R_IBEFORE R_LAST R_NEXT R_NOKEY
+ R_NOOVERWRITE R_PREV R_RECNOSYNC R_SETCURSOR R_SNAPSHOT
+ __R_UNUSED));
+
+print constant_types(); # macro defs
+foreach (C_constant ("DB_File", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("DB_File", $types);
+__END__
+ */
+
+ switch (len) {
+ case 5:
+ if (memEQ(name, "R_DUP", 5)) {
+#ifdef R_DUP
+ *iv_return = R_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ if (memEQ(name, "BTREEVERSION", 12)) {
+#ifdef BTREEVERSION
+ *iv_return = BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 13:
+ if (memEQ(name, "R_NOOVERWRITE", 13)) {
+#ifdef R_NOOVERWRITE
+ *iv_return = R_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 14:
+ if (memEQ(name, "MAX_REC_NUMBER", 14)) {
+#ifdef MAX_REC_NUMBER
+ *iv_return = MAX_REC_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 15:
+ /* Names all of length 15. */
+ /* MAX_PAGE_NUMBER MAX_PAGE_OFFSET */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'N':
+ if (memEQ(name, "MAX_PAGE_NUMBER", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_NUMBER
+ *iv_return = MAX_PAGE_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "MAX_PAGE_OFFSET", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_OFFSET
+ *iv_return = MAX_PAGE_OFFSET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/storage/bdb/perl/DB_File/fallback.xs b/storage/bdb/perl/DB_File/fallback.xs
new file mode 100644
index 00000000000..8650cdf7646
--- /dev/null
+++ b/storage/bdb/perl/DB_File/fallback.xs
@@ -0,0 +1,88 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ /* const char *pv; Uncomment this if you need to return PVs */
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid DB_File macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined DB_File macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ /* Uncomment this if you need to return PVs
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break; */
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing DB_File macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/storage/bdb/perl/DB_File/hints/dynixptx.pl b/storage/bdb/perl/DB_File/hints/dynixptx.pl
new file mode 100644
index 00000000000..bb5ffa56e6b
--- /dev/null
+++ b/storage/bdb/perl/DB_File/hints/dynixptx.pl
@@ -0,0 +1,3 @@
+# Need to add an extra '-lc' to the end to work around a DYNIX/ptx bug
+
+$self->{LIBS} = ['-lm -lc'];
diff --git a/storage/bdb/perl/DB_File/hints/sco.pl b/storage/bdb/perl/DB_File/hints/sco.pl
new file mode 100644
index 00000000000..ff604409496
--- /dev/null
+++ b/storage/bdb/perl/DB_File/hints/sco.pl
@@ -0,0 +1,2 @@
+# osr5 needs to explicitly link against libc to pull in some static symbols
+$self->{LIBS} = ['-ldb -lc'] if $Config{'osvers'} =~ '3\.2v5\.0\..' ;
diff --git a/storage/bdb/perl/DB_File/patches/5.004 b/storage/bdb/perl/DB_File/patches/5.004
new file mode 100644
index 00000000000..143ec95afbc
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/storage/bdb/perl/DB_File/patches/5.004_01 b/storage/bdb/perl/DB_File/patches/5.004_01
new file mode 100644
index 00000000000..1b05eb4e02b
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.004_02 b/storage/bdb/perl/DB_File/patches/5.004_02
new file mode 100644
index 00000000000..238f8737941
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.004_03 b/storage/bdb/perl/DB_File/patches/5.004_03
new file mode 100644
index 00000000000..06331eac922
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/storage/bdb/perl/DB_File/patches/5.004_04 b/storage/bdb/perl/DB_File/patches/5.004_04
new file mode 100644
index 00000000000..a227dc700d9
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.004_05 b/storage/bdb/perl/DB_File/patches/5.004_05
new file mode 100644
index 00000000000..51c8bf35009
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.005 b/storage/bdb/perl/DB_File/patches/5.005
new file mode 100644
index 00000000000..effee3e8275
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.005_01 b/storage/bdb/perl/DB_File/patches/5.005_01
new file mode 100644
index 00000000000..2a05dd545f6
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.005_02 b/storage/bdb/perl/DB_File/patches/5.005_02
new file mode 100644
index 00000000000..5dd57ddc03f
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/patches/5.005_03 b/storage/bdb/perl/DB_File/patches/5.005_03
new file mode 100644
index 00000000000..115f9f5b909
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/storage/bdb/perl/DB_File/patches/5.6.0 b/storage/bdb/perl/DB_File/patches/5.6.0
new file mode 100644
index 00000000000..1f9b3b620de
--- /dev/null
+++ b/storage/bdb/perl/DB_File/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/storage/bdb/perl/DB_File/ppport.h b/storage/bdb/perl/DB_File/ppport.h
new file mode 100644
index 00000000000..0887c2159a9
--- /dev/null
+++ b/storage/bdb/perl/DB_File/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/storage/bdb/perl/DB_File/t/db-btree.t b/storage/bdb/perl/DB_File/t/db-btree.t
new file mode 100644
index 00000000000..a990a5c4ba5
--- /dev/null
+++ b/storage/bdb/perl/DB_File/t/db-btree.t
@@ -0,0 +1,1489 @@
+#!./perl -w
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ warn <<EOM;
+#
+# This test is known to crash in Mac OS X versions 10.1.4 (or earlier)
+# because of the buggy Berkeley DB version included with the OS.
+#
+EOM
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..177\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub lexical
+{
+ my(@a) = unpack ("C*", $a) ;
+ my(@b) = unpack ("C*", $b) ;
+
+ my $len = (@a > @b ? @b : @a) ;
+ my $i = 0 ;
+
+ foreach $i ( 0 .. $len -1) {
+ return $a[$i] - $b[$i] if $a[$i] != $b[$i] ;
+ }
+
+ return @a - @b ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ $result = normalise($result) ;
+ return $result ;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ my $result = docat($file);
+ unlink $file ;
+ return $result ;
+}
+
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
+
+my $db185mode = ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+my $Dfile = "dbbtree.tmp";
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to BTREEINFO
+
+my $dbh = new DB_File::BTREEINFO ;
+ok(1, ! defined $dbh->{flags}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{lorder}) ;
+ok(5, ! defined $dbh->{minkeypage}) ;
+ok(6, ! defined $dbh->{maxkeypage}) ;
+ok(7, ! defined $dbh->{compare}) ;
+ok(8, ! defined $dbh->{prefix}) ;
+
+$dbh->{flags} = 3000 ;
+ok(9, $dbh->{flags} == 3000) ;
+
+$dbh->{cachesize} = 9000 ;
+ok(10, $dbh->{cachesize} == 9000);
+
+$dbh->{psize} = 400 ;
+ok(11, $dbh->{psize} == 400) ;
+
+$dbh->{lorder} = 65 ;
+ok(12, $dbh->{lorder} == 65) ;
+
+$dbh->{minkeypage} = 123 ;
+ok(13, $dbh->{minkeypage} == 123) ;
+
+$dbh->{maxkeypage} = 1234 ;
+ok(14, $dbh->{maxkeypage} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
+
+# Now check the interface to BTREE
+
+my ($X, %h) ;
+ok(17, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+die "Could not tie: $!" unless $X;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(19, !$i ) ;
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(20, $h{'abc'} eq 'ABC' );
+ok(21, ! defined $h{'jimmy'} ) ;
+ok(22, ! exists $h{'jimmy'} ) ;
+ok(23, defined $h{'abc'} ) ;
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+# tie to the same file again
+ok(24, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(25, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(26, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(27, $#keys == 31) ;
+
+#Check that the keys can be retrieved in order
+my @b = keys %h ;
+my @c = sort lexical @b ;
+ok(28, ArrayCompare(\@b, \@c)) ;
+
+$h{'foo'} = '';
+ok(29, $h{'foo'} eq '' ) ;
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(30, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(31, $ok);
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(32, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(33, join(':',200..400) eq join(':',@foo) );
+
+# Now check all the non-tie specific stuff
+
+
+# Check R_NOOVERWRITE flag will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(34, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(35, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(36, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(37, $status == 0 );
+ok(38, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(39, $status == 0 );
+if ($null_keys_allowed) {
+ $status = $X->del('') ;
+} else {
+ $status = 0 ;
+}
+ok(40, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+ok(41, ! defined $h{'q'}) ;
+ok(42, ! defined $h{''}) ;
+
+undef $X ;
+untie %h ;
+
+ok(43, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(44, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(45, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(46, $status == 0 );
+ok(47, $value eq 'A' );
+
+# seq
+# ###
+
+# use seq to find an approximate match
+$key = 'ke' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(48, $status == 0 );
+ok(49, $key eq 'key' );
+ok(50, $value eq 'value' );
+
+# seq when the key does not match
+$key = 'zzz' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(51, $status == 1 );
+
+
+# use seq to set the cursor, then delete the record @ the cursor.
+
+$key = 'x' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(52, $status == 0 );
+ok(53, $key eq 'x' );
+ok(54, $value eq 'X' );
+$status = $X->del(0, R_CURSOR) ;
+ok(55, $status == 0 );
+$status = $X->get('x', $value) ;
+ok(56, $status == 1 );
+
+# ditto, but use put to replace the key/value pair.
+$key = 'y' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(57, $status == 0 );
+ok(58, $key eq 'y' );
+ok(59, $value eq 'Y' );
+
+$key = "replace key" ;
+$value = "replace value" ;
+$status = $X->put($key, $value, R_CURSOR) ;
+ok(60, $status == 0 );
+ok(61, $key eq 'replace key' );
+ok(62, $value eq 'replace value' );
+$status = $X->get('y', $value) ;
+ok(63, 1) ; # hard-wire to always pass. the previous test ($status == 1)
+ # only worked because of a bug in 1.85/6
+
+# use seq to walk forwards through a file
+
+$status = $X->seq($key, $value, R_FIRST) ;
+ok(64, $status == 0 );
+my $previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_NEXT)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == 1 ;
+}
+
+ok(65, $status == 1 );
+ok(66, $ok == 1 );
+
+# use seq to walk backwards through a file
+$status = $X->seq($key, $value, R_LAST) ;
+ok(67, $status == 0 );
+$previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_PREV)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == -1 ;
+ #print "key = [$key] value = [$value]\n" ;
+}
+
+ok(68, $status == 1 );
+ok(69, $ok == 1 );
+
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(70, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(71, $status != 0 );
+
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# Now try an in memory file
+my $Y;
+ok(72, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
+
+# fd with an in memory file should return failure
+$status = $Y->fd ;
+ok(73, $status == -1 );
+
+
+undef $Y ;
+untie %h ;
+
+# Duplicate keys
+my $bt = new DB_File::BTREEINFO ;
+$bt->{flags} = R_DUP ;
+my ($YY, %hh);
+ok(74, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
+
+$hh{'Wall'} = 'Larry' ;
+$hh{'Wall'} = 'Stone' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key and value
+$hh{'Smith'} = 'John' ;
+$hh{'mouse'} = 'mickey' ;
+
+# first work in scalar context
+ok(75, scalar $YY->get_dup('Unknown') == 0 );
+ok(76, scalar $YY->get_dup('Smith') == 1 );
+ok(77, scalar $YY->get_dup('Wall') == 4 );
+
+# now in list context
+my @unknown = $YY->get_dup('Unknown') ;
+ok(78, "@unknown" eq "" );
+
+my @smith = $YY->get_dup('Smith') ;
+ok(79, "@smith" eq "John" );
+
+{
+my @wall = $YY->get_dup('Wall') ;
+my %wall ;
+@wall{@wall} = @wall ;
+ok(80, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
+}
+
+# hash
+my %unknown = $YY->get_dup('Unknown', 1) ;
+ok(81, keys %unknown == 0 );
+
+my %smith = $YY->get_dup('Smith', 1) ;
+ok(82, keys %smith == 1 && $smith{'John'}) ;
+
+my %wall = $YY->get_dup('Wall', 1) ;
+ok(83, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 2);
+
+undef $YY ;
+untie %hh ;
+unlink $Dfile;
+
+
+# test multiple callbacks
+my $Dfile1 = "btree1" ;
+my $Dfile2 = "btree2" ;
+my $Dfile3 = "btree3" ;
+
+my $dbh1 = new DB_File::BTREEINFO ;
+$dbh1->{compare} = sub {
+ no warnings 'numeric' ;
+ $_[0] <=> $_[1] } ;
+
+my $dbh2 = new DB_File::BTREEINFO ;
+$dbh2->{compare} = sub { $_[0] cmp $_[1] } ;
+
+my $dbh3 = new DB_File::BTREEINFO ;
+$dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
+
+
+my (%g, %k);
+tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) or die $!;
+tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) or die $!;
+tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) or die $!;
+
+my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+my (@srt_1, @srt_2, @srt_3);
+{
+ no warnings 'numeric' ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+}
+@srt_2 = sort { $a cmp $b } @Keys ;
+@srt_3 = sort { length $a <=> length $b } @Keys ;
+
+foreach (@Keys) {
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+}
+
+sub ArrayCompare
+{
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+}
+
+ok(84, ArrayCompare (\@srt_1, [keys %h]) );
+ok(85, ArrayCompare (\@srt_2, [keys %g]) );
+ok(86, ArrayCompare (\@srt_3, [keys %k]) );
+
+untie %h ;
+untie %g ;
+untie %k ;
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+# clear
+# #####
+
+ok(87, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(88, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(89, $i == 0);
+
+untie %h ;
+unlink $Dfile1 ;
+
+{
+ # check that attempting to tie an array to a DB_BTREE will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
+ ok(90, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(91, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
+ ' ;
+
+ main::ok(92, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(93, $@ eq "") ;
+ main::ok(94, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(95, $@ eq "") ;
+ main::ok(96, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(97, $@ eq "" ) ;
+ main::ok(98, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(99, $@ eq "") ;
+ main::ok(100, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(101, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(102, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(104, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(105, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(106, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(107, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(109, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(110, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(111, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(112, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(113, $h{"fred"} eq "joe");
+ ok(114, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(115, $db->FIRSTKEY() eq "fred") ;
+ ok(116, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(117, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(118, $h{"fred"} eq "joe");
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(120, $db->FIRSTKEY() eq "fred") ;
+ ok(121, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(122, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(123, $result{"store key"} eq "store key - 1: [fred]");
+ ok(124, $result{"store value"} eq "store value - 1: [joe]");
+ ok(125, ! defined $result{"fetch key"} );
+ ok(126, ! defined $result{"fetch value"} );
+ ok(127, $_ eq "original") ;
+
+ ok(128, $db->FIRSTKEY() eq "fred") ;
+ ok(129, $result{"store key"} eq "store key - 1: [fred]");
+ ok(130, $result{"store value"} eq "store value - 1: [joe]");
+ ok(131, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(132, ! defined $result{"fetch value"} );
+ ok(133, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(134, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(135, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(136, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(137, ! defined $result{"fetch value"} );
+ ok(138, $_ eq "original") ;
+
+ ok(139, $h{"fred"} eq "joe");
+ ok(140, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(141, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(142, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(143, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(144, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(145, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 1
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+ unlink "tree" ;
+ }
+
+ delete $DB_BTREE->{'compare'} ;
+
+ ok(147, docat_del($file) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 2
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, %h);
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(148, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Larry
+Wall -> Larry
+mouse -> mickey
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 3
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $status, $key, $value);
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(149, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Larry
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 4
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(150, docat_del($file) eq <<'EOM') ;
+Wall occurred 3 times
+Larry is there
+There are 2 Brick Walls
+Wall => [Brick Brick Larry]
+Smith => [John]
+Dog => []
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 5
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(151, docat_del($file) eq <<'EOM') ;
+Larry Wall is there
+Harry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 6
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my ($filename, $x, %h, $found);
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(152, docat_del($file) eq <<'EOM') ;
+Larry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 7
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ my ($filename, $x, %h, $st, $key, $value);
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+
+ }
+
+ ok(153, docat_del($file) eq <<'EOM') ;
+IN ORDER
+Smith -> John
+Wall -> Larry
+Walls -> Brick
+mouse -> mickey
+
+PARTIAL MATCH
+Wa -> Wall -> Larry
+A -> Smith -> John
+a -> mouse -> mickey
+EOM
+
+}
+
+#{
+# # R_SETCURSOR
+# use strict ;
+# my (%h, $db) ;
+# unlink $Dfile;
+#
+# ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+#
+# $h{abc} = 33 ;
+# my $k = "newest" ;
+# my $v = 44 ;
+# my $status = $db->put($k, $v, R_SETCURSOR) ;
+# print "status = [$status]\n" ;
+# ok(157, $status == 0) ;
+# $status = $db->del($k, R_CURSOR) ;
+# print "status = [$status]\n" ;
+# ok(158, $status == 0) ;
+# $k = "newest" ;
+# ok(159, $db->get($k, $v, R_CURSOR)) ;
+#
+# ok(160, keys %h == 1) ;
+#
+# undef $db ;
+# untie %h;
+# unlink $Dfile;
+#}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(154, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(155, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(157, $h{'Alpha_ABC'} == 2);
+ ok(158, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(159, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(160, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(161, $bad_key == 0);
+
+ undef $db ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # now an error to pass 'compare' a non-code reference
+ my $dbh = new DB_File::BTREEINFO ;
+
+ eval { $dbh->{compare} = 2 };
+ ok(162, $@ =~ /^Key 'compare' not associated with a code reference at/);
+
+ eval { $dbh->{prefix} = 2 };
+ ok(163, $@ =~ /^Key 'prefix' not associated with a code reference at/);
+
+}
+
+
+{
+ # recursion detection in btree
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::BTREEINFO ;
+ $dbh->{compare} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(164, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(165, $@ =~ /^DB_File btree_compare: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two callbacks don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::BTREEINFO ;
+ $dbh1->{compare} = sub { ++ $h1_count ; $_[0] cmp $_[1] } ;
+
+ my $dbh2 = new DB_File::BTREEINFO ;
+ $dbh2->{compare} = sub { ;++ $h2_count ; $_[0] cmp $_[1] } ;
+
+
+
+ my (%h);
+ ok(166, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(167, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(168, $h1_count > 0);
+ ok(169, $h1_count == $h2_count);
+
+ ok(170, safeUntie \%hash1);
+ ok(171, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(172, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(173, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (174, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(175, $h{"fred"} eq "joe");
+
+ ok(176, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (177, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/storage/bdb/perl/DB_File/t/db-hash.t b/storage/bdb/perl/DB_File/t/db-hash.t
new file mode 100644
index 00000000000..10623cc82a7
--- /dev/null
+++ b/storage/bdb/perl/DB_File/t/db-hash.t
@@ -0,0 +1,981 @@
+#!./perl
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..143\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ $result = normalise($result) ;
+ unlink $file ;
+ return $result;
+}
+
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to HASHINFO
+
+my $dbh = new DB_File::HASHINFO ;
+
+ok(1, ! defined $dbh->{bsize}) ;
+ok(2, ! defined $dbh->{ffactor}) ;
+ok(3, ! defined $dbh->{nelem}) ;
+ok(4, ! defined $dbh->{cachesize}) ;
+ok(5, ! defined $dbh->{hash}) ;
+ok(6, ! defined $dbh->{lorder}) ;
+
+$dbh->{bsize} = 3000 ;
+ok(7, $dbh->{bsize} == 3000 );
+
+$dbh->{ffactor} = 9000 ;
+ok(8, $dbh->{ffactor} == 9000 );
+
+$dbh->{nelem} = 400 ;
+ok(9, $dbh->{nelem} == 400 );
+
+$dbh->{cachesize} = 65 ;
+ok(10, $dbh->{cachesize} == 65 );
+
+my $some_sub = sub {} ;
+$dbh->{hash} = $some_sub;
+ok(11, $dbh->{hash} eq $some_sub );
+
+$dbh->{lorder} = 1234 ;
+ok(12, $dbh->{lorder} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(13, $@ =~ /^DB_File::HASHINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
+
+
+# Now check the interface to HASH
+my ($X, %h);
+ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+die "Could not tie: $!" unless $X;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(16, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640) ||
+ $noMode{$^O} );
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(17, !$i );
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(18, $h{'abc'} eq 'ABC' );
+ok(19, !defined $h{'jimmy'} );
+ok(20, !exists $h{'jimmy'} );
+ok(21, exists $h{'abc'} );
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+
+# tie to the same file again, do not supply a type - should default to HASH
+ok(22, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640) );
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(23, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(24, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(25, $#keys == 31) ;
+
+$h{'foo'} = '';
+ok(26, $h{'foo'} eq '' );
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(27, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(28, $ok );
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(29, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(30, join(':',200..400) eq join(':',@foo) );
+
+
+# Now check all the non-tie specific stuff
+
+# Check NOOVERWRITE will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(31, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(32, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(33, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(34, $status == 0 );
+ok(35, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(36, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+{
+ no warnings 'uninitialized' ;
+ ok(37, $h{'q'} eq undef );
+}
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(38, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(39, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(40, $status == 0 );
+ok(41, $value eq 'A' );
+
+# seq
+# ###
+
+# ditto, but use put to replace the key/value pair.
+
+# use seq to walk backwards through a file - check that this reversed is
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(42, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(43, $status != 0 );
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# clear
+# #####
+
+ok(44, tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(45, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(46, $i == 0);
+
+untie %h ;
+unlink $Dfile ;
+
+
+# Now try an in memory file
+ok(47, $X = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+# fd with an in memory file should return fail
+$status = $X->fd ;
+ok(48, $status == -1 );
+
+undef $X ;
+untie %h ;
+
+{
+ # check ability to override the default hashing
+ my %x ;
+ my $filename = "xyz" ;
+ my $hi = new DB_File::HASHINFO ;
+ $::count = 0 ;
+ $hi->{hash} = sub { ++$::count ; length $_[0] } ;
+ ok(49, tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $hi ) ;
+ $h{"abc"} = 123 ;
+ ok(50, $h{"abc"} == 123) ;
+ untie %x ;
+ unlink $filename ;
+ ok(51, $::count >0) ;
+}
+
+{
+ # check that attempting to tie an array to a DB_HASH will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_HASH ; } ;
+ ok(52, $@ =~ /^DB_File can only tie an associative array to a DB_HASH database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(53, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbhash.tmp", O_RDWR|O_CREAT, 0640, $DB_HASH );
+ ' ;
+
+ main::ok(54, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(55, $@ eq "") ;
+ main::ok(56, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(57, $@ eq "") ;
+ main::ok(58, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(59, $@ eq "" ) ;
+ main::ok(60, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(61, $@ eq "") ;
+ main::ok(62, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ no warnings 'uninitialized';
+ my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(63, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(64, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(65, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(66, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ my ($k, $v) ;
+ $k = 'fred';
+ ok(67, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(68, $k eq "fred") ;
+ ok(69, $v eq "joe") ;
+ # fk sk fv sv
+ ok(70, checkOutput( "fred", "fred", "joe", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(71, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(72, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(73, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $k = 'Fred'; $v ='';
+ ok(74, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(75, $k eq "FRED") ;
+ ok(76, $v eq "[Jxe]") ;
+ # fk sk fv sv
+ ok(77, checkOutput( "FRED", "fred", "[Jxe]", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(78, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(79, $h{"fred"} eq "joe");
+ ok(80, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ #ok(77, $db->FIRSTKEY() eq "fred") ;
+ $k = 'fred';
+ ok(81, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(82, $k eq "fred") ;
+ ok(83, $v eq "joe") ;
+ # fk sk fv sv
+ ok(84, checkOutput( "fred", "fred", "joe", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(85, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(86, $h{"fred"} eq "joe");
+ ok(87, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $k = 'fred';
+ ok(88, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(89, $k eq "fred") ;
+ ok(90, $v eq "joe") ;
+ ok(91, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(92, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(93, $result{"store key"} eq "store key - 1: [fred]");
+ ok(94, $result{"store value"} eq "store value - 1: [joe]");
+ ok(95, ! defined $result{"fetch key"} );
+ ok(96, ! defined $result{"fetch value"} );
+ ok(97, $_ eq "original") ;
+
+ ok(98, $db->FIRSTKEY() eq "fred") ;
+ ok(99, $result{"store key"} eq "store key - 1: [fred]");
+ ok(100, $result{"store value"} eq "store value - 1: [joe]");
+ ok(101, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(102, ! defined $result{"fetch value"} );
+ ok(103, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(104, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(105, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(106, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(107, ! defined $result{"fetch value"} );
+ ok(108, $_ eq "original") ;
+
+ ok(109, $h{"fred"} eq "joe");
+ ok(110, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(111, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(112, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(113, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(114, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(115, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(116, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+ our (%h, $k, $v);
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+ unlink "fruit" ;
+ }
+
+ ok(117, docat_del($file) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(118, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(119, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(120, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(121, $h{'Alpha_ABC'} == 2);
+ ok(122, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(123, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(124, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(125, $bad_key == 0);
+
+ undef $db ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # now an error to pass 'hash' a non-code reference
+ my $dbh = new DB_File::HASHINFO ;
+
+ eval { $dbh->{hash} = 2 };
+ ok(126, $@ =~ /^Key 'hash' not associated with a code reference at/);
+
+}
+
+{
+ # recursion detection in hash
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::HASHINFO ;
+ $dbh->{hash} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(127, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(128, $@ =~ /^DB_File hash callback: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two hash's don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::HASHINFO ;
+ $dbh1->{hash} = sub { ++ $h1_count ; length $_[0] } ;
+
+ my $dbh2 = new DB_File::HASHINFO ;
+ $dbh2->{hash} = sub { ++ $h2_count ; length $_[0] } ;
+
+
+
+ my (%h);
+ ok(129, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(130, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(131, $h1_count > 0);
+ ok(132, $h1_count == $h2_count);
+
+ ok(133, safeUntie \%hash1);
+ ok(134, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Passing undef for flags and/or mode when calling tie could cause
+ # Use of uninitialized value in subroutine entry
+
+
+ my $warn_count = 0 ;
+ #local $SIG{__WARN__} = sub { ++ $warn_count };
+ my %hash1;
+ unlink $Dfile;
+
+ tie %hash1, 'DB_File',$Dfile, undef;
+ ok(135, $warn_count == 0);
+ $warn_count = 0;
+ tie %hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, undef;
+ ok(136, $warn_count == 0);
+ tie %hash1, 'DB_File',$Dfile, undef, undef;
+ ok(137, $warn_count == 0);
+ $warn_count = 0;
+
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(138, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(139, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (140, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(141, $h{"fred"} eq "joe");
+
+ ok(142, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (143, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/storage/bdb/perl/DB_File/t/db-recno.t b/storage/bdb/perl/DB_File/t/db-recno.t
new file mode 100644
index 00000000000..5390b549376
--- /dev/null
+++ b/storage/bdb/perl/DB_File/t/db-recno.t
@@ -0,0 +1,1428 @@
+#!./perl -w
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+our ($dbh, $Dfile, $bad_ones, $FA);
+
+# full tied array support started in Perl 5.004_57
+# Double check to see if it is available.
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+
+ return $result ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ normalise($result) ;
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ my $result = docat($file);
+ unlink $file ;
+ return $result;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie @$hashref;
+ return $no_inner;
+}
+
+sub bad_one
+{
+ unless ($bad_ones++) {
+ print STDERR <<EOM ;
+#
+# Some older versions of Berkeley DB version 1 will fail db-recno
+# tests 61, 63 and 65.
+EOM
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ print STDERR <<EOM ;
+#
+# For example Mac OS X 10.1.4 (or earlier) has such an old
+# version of Berkeley DB.
+EOM
+ }
+
+ print STDERR <<EOM ;
+#
+# You can safely ignore the errors if you're never going to use the
+# broken functionality (recno databases with a modified bval).
+# Otherwise you'll have to upgrade your DB library.
+#
+# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
+# last versions that were released. Berkeley DB version 2 is continually
+# being updated -- Check out http://www.sleepycat.com/ for more details.
+#
+EOM
+ }
+}
+
+sub normalise
+{
+ return unless $^O eq 'cygwin' ;
+ foreach (@_)
+ { s#\r\n#\n#g }
+}
+
+BEGIN
+{
+ {
+ local $SIG{__DIE__} ;
+ eval { require Data::Dumper ; import Data::Dumper } ;
+ }
+
+ if ($@) {
+ *Dumper = sub { my $a = shift; return "[ @{ $a } ]" } ;
+ }
+}
+
+my $splice_tests = 10 + 12 + 1; # ten regressions, plus the randoms
+my $total_tests = 158 ;
+$total_tests += $splice_tests if $FA ;
+print "1..$total_tests\n";
+
+$Dfile = "recno.tmp";
+unlink $Dfile ;
+
+umask(0);
+
+# Check the interface to RECNOINFO
+
+$dbh = new DB_File::RECNOINFO ;
+ok(1, ! defined $dbh->{bval}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{flags}) ;
+ok(5, ! defined $dbh->{lorder}) ;
+ok(6, ! defined $dbh->{reclen}) ;
+ok(7, ! defined $dbh->{bfname}) ;
+
+$dbh->{bval} = 3000 ;
+ok(8, $dbh->{bval} == 3000 );
+
+$dbh->{cachesize} = 9000 ;
+ok(9, $dbh->{cachesize} == 9000 );
+
+$dbh->{psize} = 400 ;
+ok(10, $dbh->{psize} == 400 );
+
+$dbh->{flags} = 65 ;
+ok(11, $dbh->{flags} == 65 );
+
+$dbh->{lorder} = 123 ;
+ok(12, $dbh->{lorder} == 123 );
+
+$dbh->{reclen} = 1234 ;
+ok(13, $dbh->{reclen} == 1234 );
+
+$dbh->{bfname} = 1234 ;
+ok(14, $dbh->{bfname} == 1234 );
+
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
+
+# Now check the interface to RECNOINFO
+
+my $X ;
+my @h ;
+ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ((stat($Dfile))[2] & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
+
+#my $l = @h ;
+my $l = $X->length ;
+ok(19, ($FA ? @h == 0 : !$l) );
+
+my @data = qw( a b c d ever f g h i j k longername m n o p) ;
+
+$h[0] = shift @data ;
+ok(20, $h[0] eq 'a' );
+
+my $ i;
+foreach (@data)
+ { $h[++$i] = $_ }
+
+unshift (@data, 'a') ;
+
+ok(21, defined $h[1] );
+ok(22, ! defined $h[16] );
+ok(23, $FA ? @h == @data : $X->length == @data );
+
+
+# Overwrite an entry & check fetch it
+$h[3] = 'replaced' ;
+$data[3] = 'replaced' ;
+ok(24, $h[3] eq 'replaced' );
+
+#PUSH
+my @push_data = qw(added to the end) ;
+($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
+push (@data, @push_data) ;
+ok(25, $h[++$i] eq 'added' );
+ok(26, $h[++$i] eq 'to' );
+ok(27, $h[++$i] eq 'the' );
+ok(28, $h[++$i] eq 'end' );
+
+# POP
+my $popped = pop (@data) ;
+my $value = ($FA ? pop @h : $X->pop) ;
+ok(29, $value eq $popped) ;
+
+# SHIFT
+$value = ($FA ? shift @h : $X->shift) ;
+my $shifted = shift @data ;
+ok(30, $value eq $shifted );
+
+# UNSHIFT
+
+# empty list
+($FA ? unshift @h,() : $X->unshift) ;
+ok(31, ($FA ? @h == @data : $X->length == @data ));
+
+my @new_data = qw(add this to the start of the array) ;
+$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
+unshift (@data, @new_data) ;
+ok(32, $FA ? @h == @data : $X->length == @data );
+ok(33, $h[0] eq "add") ;
+ok(34, $h[1] eq "this") ;
+ok(35, $h[2] eq "to") ;
+ok(36, $h[3] eq "the") ;
+ok(37, $h[4] eq "start") ;
+ok(38, $h[5] eq "of") ;
+ok(39, $h[6] eq "the") ;
+ok(40, $h[7] eq "array") ;
+ok(41, $h[8] eq $data[8]) ;
+
+# Brief test for SPLICE - more thorough 'soak test' is later.
+my @old;
+if ($FA) {
+ @old = splice(@h, 1, 2, qw(bananas just before));
+}
+else {
+ @old = $X->splice(1, 2, qw(bananas just before));
+}
+ok(42, $h[0] eq "add") ;
+ok(43, $h[1] eq "bananas") ;
+ok(44, $h[2] eq "just") ;
+ok(45, $h[3] eq "before") ;
+ok(46, $h[4] eq "the") ;
+ok(47, $h[5] eq "start") ;
+ok(48, $h[6] eq "of") ;
+ok(49, $h[7] eq "the") ;
+ok(50, $h[8] eq "array") ;
+ok(51, $h[9] eq $data[8]) ;
+$FA ? splice(@h, 1, 3, @old) : $X->splice(1, 3, @old);
+
+# Now both arrays should be identical
+
+my $ok = 1 ;
+my $j = 0 ;
+foreach (@data)
+{
+ $ok = 0, last if $_ ne $h[$j ++] ;
+}
+ok(52, $ok );
+
+# Neagtive subscripts
+
+# get the last element of the array
+ok(53, $h[-1] eq $data[-1] );
+ok(54, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
+
+# get the first element using a negative subscript
+eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
+ok(55, $@ eq "" );
+ok(56, $h[0] eq "abcd" );
+
+# now try to read before the start of the array
+eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
+ok(57, $@ =~ '^Modification of non-creatable array value attempted' );
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+ok(58, safeUntie \@h);
+
+unlink $Dfile;
+
+
+{
+ # Check bval defaults to \n
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ ok(59, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(60, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ ok(61, $x eq "abc\ndef\n\nghi\n") ;
+}
+
+{
+ # Change bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{bval} = "-" ;
+ ok(62, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(63, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc-def--ghi-") ;
+ bad_one() unless $ok ;
+ ok(64, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with default bval (space)
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{reclen} = 5 ;
+ ok(65, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(66, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc def ghi ") ;
+ bad_one() unless $ok ;
+ ok(67, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with user-defined bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{bval} = "-" ;
+ $dbh->{reclen} = 5 ;
+ ok(68, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(69, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc--def-------ghi--") ;
+ bad_one() unless $ok ;
+ ok(70, $ok) ;
+}
+
+{
+ # check that attempting to tie an associative array to a DB_RECNO will fail
+
+ my $filename = "xyz" ;
+ my %x ;
+ eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
+ ok(71, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE or die "Could not close: $!";
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(72, $@ eq "") ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
+ ' ;
+ die "Could not tie: $!" unless $X;
+
+ main::ok(73, $@ eq "") ;
+
+ my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
+ main::ok(74, $@ eq "") ;
+ main::ok(75, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
+ main::ok(76, $@ eq "") ;
+ main::ok(77, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(78, $@ eq "" ) ;
+ main::ok(79, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok(80, $@ eq "") ;
+ main::ok(81, $ret eq "[[11]]") ;
+
+ undef $X;
+ main::ok(82, main::safeUntie \@h);
+ unlink "SubDB.pm", "recno.tmp" ;
+
+}
+
+{
+
+ # test $#
+ my $self ;
+ unlink $Dfile;
+ ok(83, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[2] = "ghi" ;
+ $h[3] = "jkl" ;
+ ok(84, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(85, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ ok(86, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to same length
+ ok(87, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 3 }
+ else
+ { $self->STORESIZE(4) }
+ ok(88, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(89, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(90, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to bigger
+ ok(91, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 6 }
+ else
+ { $self->STORESIZE(7) }
+ ok(92, $FA ? $#h == 6 : $self->length() == 7) ;
+ undef $self ;
+ ok(93, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(94, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
+
+ # $# sets array smaller
+ ok(95, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 2 }
+ else
+ { $self->STORESIZE(3) }
+ ok(96, $FA ? $#h == 2 : $self->length() == 3) ;
+ undef $self ;
+ ok(97, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(98, $x eq "abc\ndef\nghi\n") ;
+
+ unlink $Dfile;
+
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(99, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ # fk sk fv sv
+ ok(100, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(101, $h[0] eq "joe");
+ # fk sk fv sv
+ ok(102, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $db->FIRSTKEY() == 0) ;
+ # fk sk fv sv
+ ok(104, checkOutput( 0, "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { ++ $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ *= 2 ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[1] = "Joe" ;
+ # fk sk fv sv
+ ok(105, checkOutput( "", 2, "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(106, $h[1] eq "[Jxe]");
+ # fk sk fv sv
+ ok(107, checkOutput( "", 2, "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $db->FIRSTKEY() == 1) ;
+ # fk sk fv sv
+ ok(109, checkOutput( 1, "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(110, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(111, $h[0] eq "joe");
+ ok(112, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(113, $db->FIRSTKEY() == 0) ;
+ ok(114, checkOutput( 0, "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(115, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(116, $h[0] eq "joe");
+ ok(117, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(118, $db->FIRSTKEY() == 0) ;
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ ok(120, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+
+ unlink $Dfile;
+ ok(121, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(122, $result{"store key"} eq "store key - 1: [0]");
+ ok(123, $result{"store value"} eq "store value - 1: [joe]");
+ ok(124, ! defined $result{"fetch key"} );
+ ok(125, ! defined $result{"fetch value"} );
+ ok(126, $_ eq "original") ;
+
+ ok(127, $db->FIRSTKEY() == 0 ) ;
+ ok(128, $result{"store key"} eq "store key - 1: [0]");
+ ok(129, $result{"store value"} eq "store value - 1: [joe]");
+ ok(130, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(131, ! defined $result{"fetch value"} );
+ ok(132, $_ eq "original") ;
+
+ $h[7] = "john" ;
+ ok(133, $result{"store key"} eq "store key - 2: [0 7]");
+ ok(134, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(135, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(136, ! defined $result{"fetch value"} );
+ ok(137, $_ eq "original") ;
+
+ ok(138, $h[0] eq "joe");
+ ok(139, $result{"store key"} eq "store key - 3: [0 7 0]");
+ ok(140, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(141, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(142, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(143, $_ eq "original") ;
+
+ undef $db ;
+ ok(144, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(145, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_store_key (sub { $_ = $h[0] }) ;
+
+ eval '$h[1] = 1234' ;
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ ok(147, safeUntie \@h);
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $FA ? push @h, "green", "black"
+ : $x->push("green", "black") ;
+
+ my $elements = $FA ? scalar @h : $x->length ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $FA ? pop @h : $x->pop ;
+ print "popped $last\n" ;
+
+ $FA ? unshift @h, "white"
+ : $x->unshift("white") ;
+ my $first = $FA ? shift @h : $x->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ undef $x ;
+ untie @h ;
+
+ unlink $filename ;
+ }
+
+ ok(148, docat_del($file) eq <<'EOM') ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+The last element is green
+The 2nd last element is yellow
+EOM
+
+ my $save_output = "xyzt" ;
+ {
+ my $redirect = new Redirect $save_output ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ our (@h, $H, $file, $i);
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+ unlink $file ;
+ }
+
+ ok(149, docat_del($save_output) eq <<'EOM') ;
+
+ORIGINAL
+0: zero
+1: one
+2: two
+3: three
+4: four
+
+The last record was [four]
+The first record was [zero]
+
+REVERSE
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+
+REVERSE again
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my @h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ $h[0] = undef;
+ ok(150, $a eq "") ;
+ ok(151, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @h ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ @h = (); ;
+ ok(152, $a eq "") ;
+ ok(153, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(154, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(155, $h[0] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (156, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h[1] = "joe" ;
+
+ ok(157, $h[1] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (158, ! $@);
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+# Only test splice if this is a newish version of Perl
+exit unless $FA ;
+
+# Test SPLICE
+
+{
+ # check that the splice warnings are under the same lexical control
+ # as their non-tied counterparts.
+
+ use warnings;
+ use strict;
+
+ my $a = '';
+ my @a = (1);
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @tied ;
+
+ tie @tied, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+
+ # uninitialized offset
+ use warnings;
+ my $offset ;
+ $a = '';
+ splice(@a, $offset);
+ ok(159, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, $offset);
+ ok(160, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, $offset);
+ ok(161, $a eq '');
+ $a = '';
+ splice(@tied, $offset);
+ ok(162, $a eq '');
+
+ # uninitialized length
+ use warnings;
+ my $length ;
+ $a = '';
+ splice(@a, 0, $length);
+ ok(163, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(164, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, 0, $length);
+ ok(165, $a eq '');
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(166, $a eq '');
+
+ # offset past end of array
+ use warnings;
+ $a = '';
+ splice(@a, 3);
+ my $splice_end_array = ($a =~ /^splice\(\) offset past end of array/);
+ $a = '';
+ splice(@tied, 3);
+ ok(167, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/);
+
+ no warnings 'misc';
+ $a = '';
+ splice(@a, 3);
+ ok(168, $a eq '');
+ $a = '';
+ splice(@tied, 3);
+ ok(169, $a eq '');
+
+ ok(170, safeUntie \@tied);
+ unlink $Dfile;
+}
+
+#
+# These are a few regression tests: bundles of five arguments to pass
+# to test_splice(). The first four arguments correspond to those
+# given to splice(), and the last says which context to call it in
+# (scalar, list or void).
+#
+# The expected result is not needed because we get that by running
+# Perl's built-in splice().
+#
+my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion',
+ 'rarely', 'paleness' ],
+ -4, -2,
+ [ 'redoubled', 'Taylorize', 'Zoe', 'halogen' ],
+ 'void' ],
+
+ [ [ 'a' ], -2, 1, [ 'B' ], 'void' ],
+
+ [ [ 'Hartley', 'Islandia', 'assents', 'wishful' ],
+ 0, -4,
+ [ 'maids' ],
+ 'void' ],
+
+ [ [ 'visibility', 'pocketful', 'rectangles' ],
+ -10, 0,
+ [ 'garbages' ],
+ 'void' ],
+
+ [ [ 'sleeplessly' ],
+ 8, -4,
+ [ 'Margery', 'clearing', 'repercussion', 'clubs',
+ 'arise' ],
+ 'void' ],
+
+ [ [ 'chastises', 'recalculates' ],
+ 0, 0,
+ [ 'momentariness', 'mediates', 'accents', 'toils',
+ 'regaled' ],
+ 'void' ],
+
+ [ [ 'b', '' ],
+ 9, 8,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'b', '' ],
+ undef, undef,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'riheb' ], -8, undef, [], 'void' ],
+
+ [ [ 'uft', 'qnxs', '' ],
+ 6, -2,
+ [ 'znp', 'mhnkh', 'bn' ],
+ 'void' ],
+ );
+
+my $testnum = 171;
+my $failed = 0;
+require POSIX; my $tmp = POSIX::tmpnam();
+foreach my $test (@tests) {
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ ok($testnum++, 0);
+ }
+ else { ok($testnum++, 1) }
+}
+
+if ($failed) {
+ # Not worth running the random ones
+ print STDERR '# skipping ', $testnum++, "\n";
+}
+else {
+ # A thousand randomly-generated tests
+ $failed = 0;
+ srand(0);
+ foreach (0 .. 1000 - 1) {
+ my $test = rand_test();
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ print STDERR "# skipping any remaining random tests\n";
+ last;
+ }
+ }
+
+ ok($testnum++, not $failed);
+}
+
+die "testnum ($testnum) != total_tests ($total_tests) + 1"
+ if $testnum != $total_tests + 1;
+
+exit ;
+
+# Subroutines for SPLICE testing
+
+# test_splice()
+#
+# Test the new splice() against Perl's built-in one. The first four
+# parameters are those passed to splice(), except that the lists must
+# be (explicitly) passed by reference, and are not actually modified.
+# (It's just a test!) The last argument specifies the context in
+# which to call the functions: 'list', 'scalar', or 'void'.
+#
+# Returns:
+# undef, if the two splices give the same results for the given
+# arguments and context;
+#
+# an error message showing the difference, otherwise.
+#
+# Reads global variable $tmp.
+#
+sub test_splice {
+ die 'usage: test_splice(array, offset, length, list, context)' if @_ != 5;
+ my ($array, $offset, $length, $list, $context) = @_;
+ my @array = @$array;
+ my @list = @$list;
+
+ unlink $tmp;
+
+ my @h;
+ my $H = tie @h, 'DB_File', $tmp, O_CREAT|O_RDWR, 0644, $DB_RECNO
+ or die "cannot open $tmp: $!";
+
+ my $i = 0;
+ foreach ( @array ) { $h[$i++] = $_ }
+
+ return "basic DB_File sanity check failed"
+ if list_diff(\@array, \@h);
+
+ # Output from splice():
+ # Returned value (munged a bit), error msg, warnings
+ #
+ my ($s_r, $s_error, @s_warnings);
+
+ my $gather_warning = sub { push @s_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($s_error, @s_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.$//;
+ }
+
+ # Now do the same for DB_File's version of splice
+ my ($ms_r, $ms_error, @ms_warnings);
+ $gather_warning = sub { push @ms_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($ms_error, @ms_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.?.*//s;
+ }
+
+ return "different errors: '$s_error' vs '$ms_error'"
+ if $s_error ne $ms_error;
+ return('different return values: ' . Dumper($s_r) . ' vs ' . Dumper($ms_r))
+ if list_diff($s_r, $ms_r);
+ return('different changed list: ' . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ if ((scalar @s_warnings) != (scalar @ms_warnings)) {
+ return 'different number of warnings';
+ }
+
+ while (@s_warnings) {
+ my $sw = shift @s_warnings;
+ my $msw = shift @ms_warnings;
+
+ if (defined $sw and defined $msw) {
+ $msw =~ s/ \(.+\)$//;
+ $msw =~ s/ in splice$// if $] < 5.006;
+ if ($sw ne $msw) {
+ return "different warning: '$sw' vs '$msw'";
+ }
+ }
+ elsif (not defined $sw and not defined $msw) {
+ # Okay.
+ }
+ else {
+ return "one warning defined, another undef";
+ }
+ }
+
+ undef $H;
+ untie @h;
+
+ open(TEXT, $tmp) or die "cannot open $tmp: $!";
+ @h = <TEXT>; normalise @h; chomp @h;
+ close TEXT or die "cannot close $tmp: $!";
+ return('list is different when re-read from disk: '
+ . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ return undef; # success
+}
+
+
+# list_diff()
+#
+# Do two lists differ?
+#
+# Parameters:
+# reference to first list
+# reference to second list
+#
+# Returns true iff they differ. Only works for lists of (string or
+# undef).
+#
+# Surely there is a better way to do this?
+#
+sub list_diff {
+ die 'usage: list_diff(ref to first list, ref to second list)'
+ if @_ != 2;
+ my ($a, $b) = @_;
+ my @a = @$a; my @b = @$b;
+ return 1 if (scalar @a) != (scalar @b);
+ for (my $i = 0; $i < @a; $i++) {
+ my ($ae, $be) = ($a[$i], $b[$i]);
+ if (defined $ae and defined $be) {
+ return 1 if $ae ne $be;
+ }
+ elsif (not defined $ae and not defined $be) {
+ # Two undefined values are 'equal'
+ }
+ else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+# rand_test()
+#
+# Think up a random ARRAY, OFFSET, LENGTH, LIST, and context.
+# ARRAY or LIST might be empty, and OFFSET or LENGTH might be
+# undefined. Return a 'test' - a listref of these five things.
+#
+sub rand_test {
+ die 'usage: rand_test()' if @_;
+ my @contexts = qw<list scalar void>;
+ my $context = $contexts[int(rand @contexts)];
+ return [ rand_list(),
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ rand_list(),
+ $context ];
+}
+
+
+sub rand_list {
+ die 'usage: rand_list()' if @_;
+ my @r;
+
+ while (rand() > 0.1 * (scalar @r + 1)) {
+ push @r, rand_word();
+ }
+ return \@r;
+}
+
+
+sub rand_word {
+ die 'usage: rand_word()' if @_;
+ my $r = '';
+ my @chars = qw<a b c d e f g h i j k l m n o p q r s t u v w x y z>;
+ while (rand() > 0.1 * (length($r) + 1)) {
+ $r .= $chars[int(rand(scalar @chars))];
+ }
+ return $r;
+}
+
+
diff --git a/storage/bdb/perl/DB_File/typemap b/storage/bdb/perl/DB_File/typemap
new file mode 100644
index 00000000000..8ad7b1282dc
--- /dev/null
+++ b/storage/bdb/perl/DB_File/typemap
@@ -0,0 +1,46 @@
+# typemap for Perl 5 interface to Berkeley
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 10th December 2000
+# version 1.74
+#
+#################################### DB SECTION
+#
+#
+
+u_int T_U_INT
+DB_File T_PTROBJ
+DBT T_dbtdatum
+DBTKEY T_dbtkeydatum
+
+INPUT
+T_dbtkeydatum
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (SvOK($arg)){
+ if (db->type != DB_RECNO) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+ else {
+ Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(recno_t);
+ }
+ }
+T_dbtdatum
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ if (SvOK($arg)) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+OUTPUT
+
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_PTROBJ
+ sv_setref_pv($arg, dbtype, (void*)$var);
diff --git a/storage/bdb/perl/DB_File/version.c b/storage/bdb/perl/DB_File/version.c
new file mode 100644
index 00000000000..03b17c18e60
--- /dev/null
+++ b/storage/bdb/perl/DB_File/version.c
@@ -0,0 +1,82 @@
+/*
+
+ version.c -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 2nd Jan 2002
+ version 1.802
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ 1.72 - No change.
+ 1.73 - Added support for threading
+ 1.74 - Added Perl core patch 7801.
+
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#include <db.h>
+
+void
+#ifdef CAN_PROTOTYPE
+__getBerkeleyDBInfo(void)
+#else
+__getBerkeleyDBInfo()
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ SV * version_sv = perl_get_sv("DB_File::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("DB_File::db_ver", GV_ADD|GV_ADDMULTI) ;
+ SV * compat_sv = perl_get_sv("DB_File::db_185_compat", GV_ADD|GV_ADDMULTI) ;
+
+#ifdef DB_VERSION_MAJOR
+ int Major, Minor, Patch ;
+
+ (void)db_version(&Major, &Minor, &Patch) ;
+
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nDB_File needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ /* check that libdb is recent enough -- we need 2.3.4 or greater */
+ if (Major == 2 && (Minor < 3 || (Minor == 3 && Patch < 4)))
+ croak("DB_File needs Berkeley DB 2.3.4 or greater, you have %d.%d.%d\n",
+ Major, Minor, Patch) ;
+
+ {
+ char buffer[40] ;
+ sprintf(buffer, "%d.%d", Major, Minor) ;
+ sv_setpv(version_sv, buffer) ;
+ sprintf(buffer, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(ver_sv, buffer) ;
+ }
+
+#else /* ! DB_VERSION_MAJOR */
+ sv_setiv(version_sv, 1) ;
+ sv_setiv(ver_sv, 1) ;
+#endif /* ! DB_VERSION_MAJOR */
+
+#ifdef COMPAT185
+ sv_setiv(compat_sv, 1) ;
+#else /* ! COMPAT185 */
+ sv_setiv(compat_sv, 0) ;
+#endif /* ! COMPAT185 */
+
+}
diff --git a/storage/bdb/qam/qam.c b/storage/bdb/qam/qam.c
new file mode 100644
index 00000000000..b10f8743439
--- /dev/null
+++ b/storage/bdb/qam/qam.c
@@ -0,0 +1,1615 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam.c,v 11.134 2002/08/13 20:46:08 ubell Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __qam_bulk __P((DBC *, DBT *, u_int32_t));
+static int __qam_c_close __P((DBC *, db_pgno_t, int *));
+static int __qam_c_del __P((DBC *));
+static int __qam_c_destroy __P((DBC *));
+static int __qam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_consume __P((DBC *, QMETA *, db_recno_t));
+static int __qam_getno __P((DB *, const DBT *, db_recno_t *));
+
+/*
+ * __qam_position --
+ * Position a queued access method cursor at a record. This returns
+ * the page locked. *exactp will be set if the record is valid.
+ * PUBLIC: int __qam_position
+ * PUBLIC: __P((DBC *, db_recno_t *, qam_position_mode, int *));
+ */
+int
+__qam_position(dbc, recnop, mode, exactp)
+ DBC *dbc; /* open cursor */
+ db_recno_t *recnop; /* pointer to recno to find */
+ qam_position_mode mode;/* locking: read or write */
+ int *exactp; /* indicate if it was found */
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ QAMDATA *qp;
+ db_pgno_t pg;
+ int ret;
+
+ dbp = dbc->dbp;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Fetch the page for this recno. */
+ pg = QAM_RECNO_PAGE(dbp, *recnop);
+
+ if ((ret = __db_lget(dbc, 0, pg, mode == QAM_READ ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ return (ret);
+ cp->page = NULL;
+ *exactp = 0;
+ if ((ret = __qam_fget(dbp, &pg,
+ mode == QAM_WRITE ? DB_MPOOL_CREATE : 0, &cp->page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, cp->lock);
+ if (mode != QAM_WRITE &&
+ (ret == DB_PAGE_NOTFOUND || ret == ENOENT))
+ return (0);
+ return (ret);
+ }
+ cp->pgno = pg;
+ cp->indx = QAM_RECNO_INDEX(dbp, pg, *recnop);
+
+ if (PGNO(cp->page) == 0) {
+ if (F_ISSET(dbp, DB_AM_RDONLY)) {
+ *exactp = 0;
+ return (0);
+ }
+ PGNO(cp->page) = pg;
+ TYPE(cp->page) = P_QAMDATA;
+ }
+
+ qp = QAM_GET_RECORD(dbp, cp->page, cp->indx);
+ *exactp = F_ISSET(qp, QAM_VALID) ? 1 : 0;
+
+ return (ret);
+}
+
+/*
+ * __qam_pitem --
+ * Put an item on a queue page. Copy the data to the page and set the
+ * VALID and SET bits. If logging and the record was previously set,
+ * log that data, otherwise just log the new data.
+ *
+ * pagep must be write locked
+ *
+ * PUBLIC: int __qam_pitem
+ * PUBLIC: __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+ */
+int
+__qam_pitem(dbc, pagep, indx, recno, data)
+ DBC *dbc;
+ QPAGE *pagep;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT *data;
+{
+ DB *dbp;
+ DBT olddata, pdata, *datap;
+ QAMDATA *qp;
+ QUEUE *t;
+ u_int32_t alloced;
+ u_int8_t *dest, *p;
+ int ret;
+
+ alloced = ret = 0;
+
+ dbp = dbc->dbp;
+ t = (QUEUE *)dbp->q_internal;
+
+ if (data->size > t->re_len)
+ goto len_err;
+
+ qp = QAM_GET_RECORD(dbp, pagep, indx);
+
+ p = qp->data;
+ datap = data;
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (data->doff + data->dlen > t->re_len) {
+ alloced = data->dlen;
+ goto len_err;
+ }
+ if (data->size != data->dlen) {
+len_err: __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)(alloced ? alloced : data->size));
+ return (EINVAL);
+ }
+ if (data->size == t->re_len)
+ goto no_partial;
+
+ /*
+ * If we are logging, then we have to build the record
+ * first, otherwise, we can simply drop the change
+ * directly on the page. After this clause, make
+ * sure that datap and p are set up correctly so that
+ * copying datap into p does the right thing.
+ *
+ * Note, I am changing this so that if the existing
+ * record is not valid, we create a complete record
+ * to log so that both this and the recovery code is simpler.
+ */
+
+ if (DBC_LOGGING(dbc) || !F_ISSET(qp, QAM_VALID)) {
+ datap = &pdata;
+ memset(datap, 0, sizeof(*datap));
+
+ if ((ret = __os_malloc(dbp->dbenv,
+ t->re_len, &datap->data)) != 0)
+ return (ret);
+ alloced = 1;
+ datap->size = t->re_len;
+
+ /*
+ * Construct the record if it's valid, otherwise set it
+ * all to the pad character.
+ */
+ dest = datap->data;
+ if (F_ISSET(qp, QAM_VALID))
+ memcpy(dest, p, t->re_len);
+ else
+ memset(dest, t->re_pad, t->re_len);
+
+ dest += data->doff;
+ memcpy(dest, data->data, data->size);
+ } else {
+ datap = data;
+ p += data->doff;
+ }
+ }
+
+no_partial:
+ if (DBC_LOGGING(dbc)) {
+ olddata.size = 0;
+ if (F_ISSET(qp, QAM_SET)) {
+ olddata.data = qp->data;
+ olddata.size = t->re_len;
+ }
+ if ((ret = __qam_add_log(dbp, dbc->txn, &LSN(pagep),
+ 0, &LSN(pagep), pagep->pgno,
+ indx, recno, datap, qp->flags,
+ olddata.size == 0 ? NULL : &olddata)) != 0)
+ goto err;
+ }
+
+ F_SET(qp, QAM_VALID | QAM_SET);
+ memcpy(p, datap->data, datap->size);
+ if (!F_ISSET(data, DB_DBT_PARTIAL))
+ memset(p + datap->size, t->re_pad, t->re_len - datap->size);
+
+err: if (alloced)
+ __os_free(dbp->dbenv, datap->data);
+
+ return (ret);
+}
+/*
+ * __qam_c_put
+ * Cursor put for queued access method.
+ * BEFORE and AFTER cannot be specified.
+ */
+static int
+__qam_c_put(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t new_cur, new_first;
+ u_int32_t opcode;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ if (pgnop != NULL)
+ *pgnop = PGNO_INVALID;
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ switch (flags) {
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case DB_CURRENT:
+ break;
+ default:
+ /* The interface shouldn't let anything else through. */
+ DB_ASSERT(0);
+ return (__db_ferr(dbp->dbenv, "__qam_c_put", flags));
+ }
+
+ /* Write lock the record. */
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ return (ret);
+
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ /* We could not get the page, we can release the record lock. */
+ __LPUT(dbc, lock);
+ return (ret);
+ }
+
+ /* Put the item on the page. */
+ ret = __qam_pitem(dbc, (QPAGE *)cp->page, cp->indx, cp->recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __qam_fput(
+ dbp, cp->pgno, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+ if (ret != 0)
+ return (ret);
+
+ /* We may need to reset the head or tail of the queue. */
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ opcode = 0;
+ new_cur = new_first = 0;
+
+ /*
+ * If the put address is outside the queue, adjust the head and
+ * tail of the queue. If the order is inverted we move
+ * the one which is closer. The first case is when the
+ * queue is empty, move first and current to where the new
+ * insert is.
+ */
+
+ if (meta->first_recno == meta->cur_recno) {
+ new_first = cp->recno;
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETFIRST;
+ opcode |= QAM_SETCUR;
+ } else {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno - cp->recno <
+ cp->recno - meta->cur_recno)) {
+ new_first = cp->recno;
+ opcode |= QAM_SETFIRST;
+ }
+
+ if (meta->cur_recno == cp->recno ||
+ (QAM_AFTER_CURRENT(meta, cp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ cp->recno - meta->cur_recno <=
+ meta->first_recno - cp->recno))) {
+ new_cur = cp->recno + 1;
+ if (new_cur == RECNO_OOB)
+ new_cur++;
+ opcode |= QAM_SETCUR;
+ }
+ }
+
+ if (opcode != 0 && DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn,
+ 0, opcode, meta->first_recno, new_first,
+ meta->cur_recno, new_cur, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ if (ret != 0)
+ opcode = 0;
+ }
+
+ if (opcode & QAM_SETCUR)
+ meta->cur_recno = new_cur;
+ if (opcode & QAM_SETFIRST)
+ meta->first_recno = new_first;
+
+ if ((t_ret = mpf->put(
+ mpf, meta, opcode != 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __qam_append --
+ * Perform a put(DB_APPEND) in queue.
+ *
+ * PUBLIC: int __qam_append __P((DBC *, DBT *, DBT *));
+ */
+int
+__qam_append(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ DB *dbp;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QPAGE *page;
+ QUEUE *qp;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t recno;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ /* Get the next record number. */
+ recno = meta->cur_recno;
+ meta->cur_recno++;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno++;
+ if (meta->cur_recno == meta->first_recno) {
+ meta->cur_recno--;
+ if (meta->cur_recno == RECNO_OOB)
+ meta->cur_recno--;
+ (void)__LPUT(dbc, lock);
+ ret = EFBIG;
+ goto err;
+ }
+
+ if (QAM_BEFORE_FIRST(meta, recno))
+ meta->first_recno = recno;
+
+ /* Lock the record and release meta page lock. */
+ if ((ret = __db_lget(dbc, LCK_COUPLE_ALWAYS,
+ recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ /*
+ * The application may modify the data based on the selected record
+ * number.
+ */
+ if (dbc->dbp->db_append_recno != NULL &&
+ (ret = dbc->dbp->db_append_recno(dbc->dbp, data, recno)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ cp->lock = lock;
+ cp->lock_mode = DB_LOCK_WRITE;
+
+ pg = QAM_RECNO_PAGE(dbp, recno);
+
+ /* Fetch and write lock the data page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = __qam_fget(dbp, &pg, DB_MPOOL_CREATE, &page)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ /* See if this is a new page. */
+ if (page->pgno == 0) {
+ page->pgno = pg;
+ page->type = P_QAMDATA;
+ }
+
+ /* Put the item on the page and log it. */
+ ret = __qam_pitem(dbc, page,
+ QAM_RECNO_INDEX(dbp, pg, recno), recno, data);
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret
+ = __qam_fput(dbp, pg, page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Return the record number to the user. */
+ if (ret == 0)
+ ret = __db_retcopy(dbp->dbenv, key,
+ &recno, sizeof(recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ /* Position the cursor on this record. */
+ cp->recno = recno;
+
+ /* See if we are leaving the extent. */
+ qp = (QUEUE *) dbp->q_internal;
+ if (qp->page_ext != 0 &&
+ (recno % (qp->page_ext * qp->rec_page) == 0 ||
+ recno == UINT32_T_MAX)) {
+ if ((ret = __db_lget(dbc,
+ 0, ((QUEUE *)dbp->q_internal)->q_meta,
+ DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if (!QAM_AFTER_CURRENT(meta, recno))
+ ret = __qam_fclose(dbp, pg);
+ (void)__LPUT(dbc, lock);
+ }
+
+err:
+ /* Release the meta page. */
+ if ((t_ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_c_del --
+ * Qam cursor->am_del function
+ */
+static int
+__qam_c_del(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+ DBT data;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ QAMDATA *qp;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t pg;
+ db_recno_t first;
+ int exact, ret, t_ret;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_READ, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+ }
+
+ if (QAM_NOT_VALID(meta, cp->recno))
+ ret = DB_NOTFOUND;
+
+ first = meta->first_recno;
+
+ /* Don't hold the meta page long term. */
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err1;
+
+ if ((ret = __db_lget(dbc,
+ 0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ goto err1;
+
+ cp->lock_mode = DB_LOCK_WRITE;
+ /* Find the record ; delete only deletes exact matches. */
+ if ((ret = __qam_position(dbc,
+ &cp->recno, QAM_WRITE, &exact)) != 0) {
+ cp->lock = lock;
+ goto err1;
+ }
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+
+ pagep = cp->page;
+ qp = QAM_GET_RECORD(dbp, pagep, cp->indx);
+
+ if (DBC_LOGGING(dbc)) {
+ if (((QUEUE *)dbp->q_internal)->page_ext == 0 ||
+ ((QUEUE *)dbp->q_internal)->re_len == 0) {
+ if ((ret = __qam_del_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ data.size = ((QUEUE *)dbp->q_internal)->re_len;
+ data.data = qp->data;
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
+ pagep->pgno, cp->indx, cp->recno, &data)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+
+ if (cp->recno == first) {
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err1;
+ ret = __qam_consume(dbc, meta, first);
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+err1:
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cp->page != NULL && (t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->lock = lock;
+
+ return (ret);
+}
+
+#ifdef DEBUG_WOP
+#define QDEBUG
+#endif
+
+/*
+ * __qam_c_get --
+ * Queue cursor->c_get function.
+ */
+static int
+__qam_c_get(dbc, key, data, flags, pgnop)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ db_pgno_t *pgnop;
+{
+ DB *dbp;
+ DBC *dbcdup;
+ DBT tmp;
+ DB_ENV *dbenv;
+ DB_LOCK lock, pglock, metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pg;
+ QAMDATA *qp;
+ QMETA *meta;
+ QUEUE *t;
+ QUEUE_CURSOR *cp;
+ db_lockmode_t lock_mode;
+ db_pgno_t metapno;
+ db_recno_t first;
+ qam_position_mode mode;
+ int exact, is_first, locked, ret, t_ret, wait, with_delete;
+ int put_mode, meta_dirty, retrying;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ PANIC_CHECK(dbenv);
+
+ wait = 0;
+ with_delete = 0;
+ retrying = 0;
+ lock_mode = DB_LOCK_READ;
+ put_mode = 0;
+ t_ret = 0;
+ *pgnop = 0;
+ pg = NULL;
+
+ mode = QAM_READ;
+ if (F_ISSET(dbc, DBC_RMW)) {
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_WRITE;
+ }
+
+ if (flags == DB_CONSUME_WAIT) {
+ wait = 1;
+ flags = DB_CONSUME;
+ }
+ if (flags == DB_CONSUME) {
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
+ return (ret);
+
+ with_delete = 1;
+ flags = DB_FIRST;
+ lock_mode = DB_LOCK_WRITE;
+ mode = QAM_CONSUME;
+ }
+
+ DEBUG_LREAD(dbc, dbc->txn, "qam_c_get",
+ flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+
+ /* Make lint and friends happy. */
+ meta_dirty = 0;
+ locked = 0;
+
+ is_first = 0;
+
+ t = (QUEUE *)dbp->q_internal;
+ metapno = t->q_meta;
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it. This is because someone my have it pinned
+ * but not locked.
+ */
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0)
+ return (ret);
+ if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+
+ first = 0;
+
+ /* Release any previous lock if not in a transaction. */
+ (void)__TLPUT(dbc, cp->lock);
+
+retry: /* Update the record number. */
+ switch (flags) {
+ case DB_CURRENT:
+ break;
+ case DB_NEXT_DUP:
+ ret = DB_NOTFOUND;
+ goto err;
+ /* NOTREACHED */
+ case DB_NEXT:
+ case DB_NEXT_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ ++cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_FIRST:
+ flags = DB_NEXT;
+ is_first = 1;
+
+ /* get the first record number */
+ cp->recno = first = meta->first_recno;
+
+ break;
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (cp->recno != RECNO_OOB) {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) ||
+ cp->recno == meta->first_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ --cp->recno;
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ --cp->recno;
+ break;
+ }
+ /* FALLTHROUGH */
+ case DB_LAST:
+ if (meta->first_recno == meta->cur_recno) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ cp->recno = meta->cur_recno - 1;
+ if (cp->recno == RECNO_OOB)
+ cp->recno--;
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_flag(dbenv, "__qam_c_get", flags);
+ goto err;
+ }
+
+ /*
+ * Check to see if we are out of data. Current points to
+ * the first free slot.
+ */
+ if (cp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, cp->recno)) {
+ ret = DB_NOTFOUND;
+ pg = NULL;
+ if (wait) {
+ flags = DB_FIRST;
+ /*
+ * If first is not set, then we skipped a
+ * locked record, go back and find it.
+ * If we find a locked record again
+ * wait for it.
+ */
+ if (first == 0) {
+ retrying = 1;
+ goto retry;
+ }
+ if (CDB_LOCKING(dbenv)) {
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
+ DB_LOCK_SWITCH, &dbc->lock_dbt,
+ DB_LOCK_WAIT, &dbc->mylock)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt,
+ DB_LOCK_WRITE, &dbc->mylock)) != 0)
+ goto err;
+ goto retry;
+ }
+ /*
+ * Wait for someone to update the meta page.
+ * This will probably mean there is something
+ * in the queue. We then go back up and
+ * try again.
+ */
+ if (locked == 0) {
+ if ((ret = __db_lget( dbc,
+ 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ if (cp->recno != RECNO_OOB &&
+ !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto retry;
+ }
+ if ((ret = __db_lget(dbc, 0, metapno,
+ DB_LOCK_WAIT, DB_LOCK_SWITCH, &metalock)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
+ &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto retry;
+ }
+
+ goto err;
+ }
+
+ /* Don't hold the meta page long term. */
+ if (locked) {
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ locked = 0;
+ }
+
+ /* Lock the record. */
+ if ((ret = __db_lget(dbc, 0, cp->recno, lock_mode,
+ (with_delete && !retrying) ?
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD : DB_LOCK_RECORD,
+ &lock)) == DB_LOCK_NOTGRANTED && with_delete) {
+#ifdef QDEBUG
+ __db_logmsg(dbenv,
+ dbc->txn, "Queue S", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ first = 0;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
+ goto retry;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * In the DB_FIRST or DB_LAST cases we must wait and then start over
+ * since the first/last may have moved while we slept.
+ * We release our locks and try again.
+ */
+ if ((!with_delete && is_first) || flags == DB_LAST) {
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ if (cp->recno !=
+ (is_first ? meta->first_recno : (meta->cur_recno - 1))) {
+ __LPUT(dbc, lock);
+ if (is_first)
+ flags = DB_FIRST;
+ locked = 1;
+ goto retry;
+ }
+ /* Don't hold the meta page long term. */
+ if ((ret = __LPUT(dbc, metalock)) != 0)
+ goto err;
+ }
+
+ /* Position the cursor on the record. */
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0) {
+ /* We cannot get the page, release the record lock. */
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+
+ pg = cp->page;
+ pglock = cp->lock;
+ cp->lock = lock;
+ cp->lock_mode = lock_mode;
+
+ if (!exact) {
+ if (flags == DB_NEXT || flags == DB_NEXT_NODUP ||
+ flags == DB_PREV || flags == DB_PREV_NODUP ||
+ flags == DB_LAST) {
+ /* Release locks and try again. */
+ if (pg != NULL)
+ (void)__qam_fput(dbp, cp->pgno, pg, 0);
+ cp->page = pg = NULL;
+ (void)__LPUT(dbc, pglock);
+ (void)__LPUT(dbc, cp->lock);
+ if (flags == DB_LAST)
+ flags = DB_PREV;
+ if (!with_delete)
+ is_first = 0;
+ retrying = 0;
+ goto retry;
+ }
+ /* this is for the SET and SET_RANGE cases */
+ ret = DB_KEYEMPTY;
+ goto err1;
+ }
+
+ /* Return the key if the user didn't give us one. */
+ if (key != NULL) {
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err1;
+ F_SET(key, DB_DBT_ISSET);
+ }
+
+ qp = QAM_GET_RECORD(dbp, pg, cp->indx);
+
+ /* Return the data item. */
+ if (flags == DB_GET_BOTH || flags == DB_GET_BOTH_RANGE) {
+ /*
+ * Need to compare
+ */
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret = __bam_defcmp(dbp, data, &tmp)) != 0) {
+ ret = DB_NOTFOUND;
+ goto err1;
+ }
+ }
+ if (data != NULL &&
+ !F_ISSET(dbc, DBC_MULTIPLE|DBC_MULTIPLE_KEY) &&
+ (ret = __db_retcopy(dbp->dbenv, data,
+ qp->data, t->re_len, &dbc->rdata->data, &dbc->rdata->ulen)) != 0)
+ goto err1;
+
+ if (data != NULL)
+ F_SET(data, DB_DBT_ISSET);
+
+ /* Finally, if we are doing DB_CONSUME mark the record. */
+ if (with_delete) {
+ /*
+ * Assert that we're not a secondary index. Doing a DB_CONSUME
+ * on a secondary makes very little sense, since one can't
+ * DB_APPEND there; attempting one should be forbidden by
+ * the interface.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ /*
+ * Check and see if we *have* any secondary indices.
+ * If we do, we're a primary, so call __db_c_del_primary
+ * to delete the references to the item we're about to
+ * delete.
+ *
+ * Note that we work on a duplicated cursor, since the
+ * __db_ret work has already been done, so it's not safe
+ * to perform any additional ops on this cursor.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL) {
+ if ((ret = __db_c_idup(dbc,
+ &dbcdup, DB_POSITIONI)) != 0)
+ goto err1;
+
+ if ((ret = __db_c_del_primary(dbcdup)) != 0) {
+ /*
+ * The __db_c_del_primary return is more
+ * interesting.
+ */
+ (void)dbcdup->c_close(dbcdup);
+ goto err1;
+ }
+
+ if ((ret = dbcdup->c_close(dbcdup)) != 0)
+ goto err1;
+ }
+
+ if (DBC_LOGGING(dbc)) {
+ if (t->page_ext == 0 || t->re_len == 0) {
+ if ((ret = __qam_del_log(dbp, dbc->txn,
+ &LSN(pg), 0, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno)) != 0)
+ goto err1;
+ } else {
+ tmp.data = qp->data;
+ tmp.size = t->re_len;
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pg), 0, &LSN(pg),
+ pg->pgno, cp->indx, cp->recno, &tmp)) != 0)
+ goto err1;
+ }
+ }
+
+ F_CLR(qp, QAM_VALID);
+ put_mode = DB_MPOOL_DIRTY;
+
+ if ((ret = __LPUT(dbc, pglock)) != 0)
+ goto err1;
+
+ /*
+ * Now we need to update the metapage
+ * first pointer. If we have deleted
+ * the record that is pointed to by
+ * first_recno then we move it as far
+ * forward as we can without blocking.
+ * The metapage lock must be held for
+ * the whole scan otherwise someone could
+ * do a random insert behind where we are
+ * looking.
+ */
+
+ if (locked == 0 && (ret = __db_lget(
+ dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err1;
+ locked = 1;
+
+#ifdef QDEBUG
+ __db_logmsg(dbenv,
+ dbc->txn, "Queue D", 0, "%x %d %d %d",
+ dbc->locker, cp->recno, first, meta->first_recno);
+#endif
+ /*
+ * See if we deleted the "first" record. If
+ * first is zero then we skipped something,
+ * see if first_recno has been move passed
+ * that to the record that we deleted.
+ */
+ if (first == 0)
+ first = cp->recno;
+ if (first != meta->first_recno)
+ goto done;
+
+ if ((ret = __qam_consume(dbc, meta, first)) != 0)
+ goto err1;
+ }
+
+done:
+err1: if (cp->page != NULL) {
+ t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode);
+
+ if (!ret)
+ ret = t_ret;
+ /* Doing record locking, release the page lock */
+ t_ret = __LPUT(dbc, pglock);
+ cp->page = NULL;
+ }
+
+err: if (!ret)
+ ret = t_ret;
+ if (meta) {
+
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, meta_dirty ? DB_MPOOL_DIRTY : 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if (locked)
+ t_ret = __LPUT(dbc, metalock);
+ }
+ DB_ASSERT(!LOCK_ISSET(metalock));
+
+ /*
+ * There is no need to keep the record locked if we are
+ * not in a transaction.
+ */
+ if (t_ret == 0)
+ t_ret = __TLPUT(dbc, cp->lock);
+
+ return (ret ? ret : t_ret);
+}
+
+/*
+ * __qam_consume -- try to reset the head of the queue.
+ *
+ */
+
+static int
+__qam_consume(dbc, meta, first)
+ DBC *dbc;
+ QMETA *meta;
+ db_recno_t first;
+{
+ DB *dbp;
+ DB_LOCK lock, save_lock;
+ DB_MPOOLFILE *mpf;
+ QUEUE_CURSOR *cp;
+ db_indx_t save_indx;
+ db_pgno_t save_page;
+ db_recno_t current, save_recno;
+ u_int32_t rec_extent;
+ int exact, put_mode, ret, t_ret, wrapped;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ put_mode = DB_MPOOL_DIRTY;
+ ret = t_ret = 0;
+
+ save_page = cp->pgno;
+ save_indx = cp->indx;
+ save_recno = cp->recno;
+ save_lock = cp->lock;
+
+ /*
+ * If we skipped some deleted records, we need to
+ * reposition on the first one. Get a lock
+ * in case someone is trying to put it back.
+ */
+ if (first != cp->recno) {
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ goto done;
+ }
+ if (ret != 0)
+ goto done;
+ if ((ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ goto done;
+ cp->page = NULL;
+ put_mode = 0;
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0 || exact != 0) {
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+ if ((ret =__LPUT(dbc, lock)) != 0)
+ goto done;
+ if ((ret = __LPUT(dbc, cp->lock)) != 0)
+ goto done;
+ }
+
+ current = meta->cur_recno;
+ wrapped = 0;
+ if (first > current)
+ wrapped = 1;
+ rec_extent = meta->page_ext * meta->rec_page;
+
+ /* Loop until we find a record or hit current */
+ for (;;) {
+ /*
+ * Check to see if we are moving off the extent
+ * and remove the extent.
+ * If we are moving off a page we need to
+ * get rid of the buffer.
+ * Wait for the lagging readers to move off the
+ * page.
+ */
+ if (cp->page != NULL && rec_extent != 0 &&
+ ((exact = (first % rec_extent == 0)) ||
+ first % meta->rec_page == 0 ||
+ first == UINT32_T_MAX)) {
+ if (exact == 1 && (ret = __db_lget(dbc,
+ 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ break;
+
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue R", 0, "%x %d %d %d",
+ dbc->locker, cp->pgno, first, meta->first_recno);
+#endif
+ put_mode |= DB_MPOOL_DISCARD;
+ if ((ret = __qam_fput(dbp,
+ cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+
+ if (exact == 1) {
+ ret = __qam_fremove(dbp, cp->pgno);
+ t_ret = __LPUT(dbc, cp->lock);
+ }
+ if (ret != 0)
+ break;
+ if (t_ret != 0) {
+ ret = t_ret;
+ break;
+ }
+ } else if (cp->page != NULL && (ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+ first++;
+ if (first == RECNO_OOB) {
+ wrapped = 0;
+ first++;
+ }
+
+ /*
+ * LOOP EXIT when we come move to the current
+ * pointer.
+ */
+ if (!wrapped && first >= current)
+ break;
+
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ break;
+ }
+ if (ret != 0)
+ break;
+
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0) {
+ (void)__LPUT(dbc, lock);
+ break;
+ }
+ put_mode = 0;
+ if ((ret =__LPUT(dbc, lock)) != 0 ||
+ (ret = __LPUT(dbc, cp->lock)) != 0 || exact) {
+ if ((t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, put_mode)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ break;
+ }
+ }
+
+ cp->pgno = save_page;
+ cp->indx = save_indx;
+ cp->recno = save_recno;
+ cp->lock = save_lock;
+
+ /*
+ * We have advanced as far as we can.
+ * Advance first_recno to this point.
+ */
+ if (ret == 0 && meta->first_recno != first) {
+#ifdef QDEBUG
+ __db_logmsg(dbp->dbenv, dbc->txn, "Queue M",
+ 0, "%x %d %d %d", dbc->locker, cp->recno,
+ first, meta->first_recno);
+#endif
+ if (DBC_LOGGING(dbc))
+ if ((ret = __qam_incfirst_log(dbp,
+ dbc->txn, &meta->dbmeta.lsn, 0,
+ cp->recno, PGNO_BASE_MD)) != 0)
+ goto done;
+ meta->first_recno = first;
+ (void)mpf->set(mpf, meta, DB_MPOOL_DIRTY);
+ }
+
+done:
+ return (ret);
+}
+
+static int
+__qam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pg;
+ QMETA *meta;
+ QAMDATA *qp;
+ QUEUE_CURSOR *cp;
+ db_indx_t indx;
+ db_pgno_t metapno;
+ qam_position_mode mode;
+ int32_t *endp, *offp;
+ u_int8_t *dbuf, *dp, *np;
+ int exact, recs, re_len, ret, t_ret, valid;
+ int is_key, need_pg, pagesize, size, space;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ mode = QAM_READ;
+ if (F_ISSET(dbc, DBC_RMW))
+ mode = QAM_WRITE;
+
+ pagesize = dbp->pgsize;
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ recs = ((QUEUE *)dbp->q_internal)->rec_page;
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ size = 0;
+
+ if ((ret = __db_lget(dbc, 0, metapno, DB_LOCK_READ, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table form the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+next_pg:
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0)
+ goto done;
+
+ pg = cp->page;
+ indx = cp->indx;
+ need_pg = 1;
+
+ do {
+ /*
+ * If this page is a nonexistent page at the end of an
+ * extent, pg may be NULL. A NULL page has no valid records,
+ * so just keep looping as though qp exists and isn't QAM_VALID;
+ * calling QAM_GET_RECORD is unsafe.
+ */
+ valid = 0;
+
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ if (pg != NULL) {
+ qp = QAM_GET_RECORD(dbp, pg, indx);
+ if (F_ISSET(qp, QAM_VALID)) {
+ valid = 1;
+ space -= (is_key ? 3 : 2) * sizeof(*offp);
+ if (space < 0)
+ goto get_space;
+ if (need_pg) {
+ dp = np;
+ size = pagesize - QPAGE_SZ(dbp);
+ if (space < size) {
+get_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ ret = ENOMEM;
+ break;
+ }
+ if (indx != 0)
+ indx--;
+ cp->recno--;
+ break;
+ }
+ memcpy(dp,
+ (char *)pg + QPAGE_SZ(dbp), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (is_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)((u_int8_t*)qp -
+ (u_int8_t*)pg - QPAGE_SZ(dbp) +
+ dp - dbuf + SSZA(QAMDATA, data));
+ *offp-- = re_len;
+ }
+ }
+ if (!valid && is_key == 0) {
+ *offp-- = 0;
+ *offp-- = 0;
+ }
+ cp->recno++;
+ } while (++indx < recs && indx != RECNO_OOB
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno));
+
+ if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (cp->page != NULL) {
+ if ((t_ret =
+ __qam_fput(dbp, cp->pgno, cp->page, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ }
+
+ if (ret == 0
+ && (indx >= recs || indx == RECNO_OOB)
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto next_pg;
+
+ if (is_key == 1)
+ *offp = RECNO_OOB;
+ else
+ *offp = -1;
+
+done:
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ t_ret = __LPUT(dbc, metalock);
+
+ return (ret);
+}
+
+/*
+ * __qam_c_close --
+ * Close down the cursor from a single use.
+ */
+static int
+__qam_c_close(dbc, root_pgno, rmroot)
+ DBC *dbc;
+ db_pgno_t root_pgno;
+ int *rmroot;
+{
+ QUEUE_CURSOR *cp;
+
+ COMPQUIET(root_pgno, 0);
+ COMPQUIET(rmroot, NULL);
+
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ /* Discard any locks not acquired inside of a transaction. */
+ (void)__TLPUT(dbc, cp->lock);
+ LOCK_INIT(cp->lock);
+
+ cp->page = NULL;
+ cp->pgno = PGNO_INVALID;
+ cp->indx = 0;
+ cp->lock_mode = DB_LOCK_NG;
+ cp->recno = RECNO_OOB;
+ cp->flags = 0;
+
+ return (0);
+}
+
+/*
+ * __qam_c_dup --
+ * Duplicate a queue cursor, such that the new one holds appropriate
+ * locks for the position of the original.
+ *
+ * PUBLIC: int __qam_c_dup __P((DBC *, DBC *));
+ */
+int
+__qam_c_dup(orig_dbc, new_dbc)
+ DBC *orig_dbc, *new_dbc;
+{
+ QUEUE_CURSOR *orig, *new;
+
+ orig = (QUEUE_CURSOR *)orig_dbc->internal;
+ new = (QUEUE_CURSOR *)new_dbc->internal;
+
+ new->recno = orig->recno;
+
+ /* reget the long term lock if we are not in a xact */
+ if (orig_dbc->txn != NULL ||
+ !STD_LOCKING(orig_dbc) || !LOCK_ISSET(orig->lock))
+ return (0);
+
+ return (__db_lget(new_dbc,
+ 0, new->recno, new->lock_mode, DB_LOCK_RECORD, &new->lock));
+}
+
+/*
+ * __qam_c_init
+ *
+ * PUBLIC: int __qam_c_init __P((DBC *));
+ */
+int
+__qam_c_init(dbc)
+ DBC *dbc;
+{
+ QUEUE_CURSOR *cp;
+ DB *dbp;
+ int ret;
+
+ dbp = dbc->dbp;
+
+ /* Allocate the internal structure. */
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (cp == NULL) {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(QUEUE_CURSOR), &cp)) != 0)
+ return (ret);
+ dbc->internal = (DBC_INTERNAL *)cp;
+ }
+
+ /* Initialize methods. */
+ dbc->c_close = __db_c_close;
+ dbc->c_count = __db_c_count;
+ dbc->c_del = __db_c_del;
+ dbc->c_dup = __db_c_dup;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
+ dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __qam_bulk;
+ dbc->c_am_close = __qam_c_close;
+ dbc->c_am_del = __qam_c_del;
+ dbc->c_am_destroy = __qam_c_destroy;
+ dbc->c_am_get = __qam_c_get;
+ dbc->c_am_put = __qam_c_put;
+ dbc->c_am_writelock = NULL;
+
+ return (0);
+}
+
+/*
+ * __qam_c_destroy --
+ * Close a single cursor -- internal version.
+ */
+static int
+__qam_c_destroy(dbc)
+ DBC *dbc;
+{
+ /* Discard the structures. */
+ __os_free(dbc->dbp->dbenv, dbc->internal);
+
+ return (0);
+}
+
+/*
+ * __qam_getno --
+ * Check the user's record number.
+ */
+static int
+__qam_getno(dbp, key, rep)
+ DB *dbp;
+ const DBT *key;
+ db_recno_t *rep;
+{
+ if ((*rep = *(db_recno_t *)key->data) == 0) {
+ __db_err(dbp->dbenv, "illegal record number of 0");
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __qam_truncate --
+ * Truncate a queue database
+ *
+ * PUBLIC: int __qam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__qam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapno;
+ int count, ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the queue, counting rows. */
+ count = 0;
+ while ((ret = __qam_c_get(dbc, NULL, NULL, DB_CONSUME, &metapno)) == 0)
+ count++;
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ /* update the meta page */
+ /* get the meta page */
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+ if (DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn, 0,
+ QAM_SETCUR | QAM_SETFIRST | QAM_TRUNCATE, meta->first_recno,
+ 1, meta->cur_recno, 1, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ }
+ if (ret == 0)
+ meta->first_recno = meta->cur_recno = 1;
+
+ if ((t_ret =
+ mpf->put(mpf, meta, ret == 0 ? DB_MPOOL_DIRTY: 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = count;
+
+ return (ret);
+}
diff --git a/storage/bdb/qam/qam.src b/storage/bdb/qam/qam.src
new file mode 100644
index 00000000000..f8bf4da4dd0
--- /dev/null
+++ b/storage/bdb/qam/qam.src
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: qam.src,v 11.28 2002/04/17 19:03:13 krinsky Exp $
+ */
+
+PREFIX __qam
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/qam.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * incfirst
+ * Used when we increment first_recno.
+ */
+BEGIN incfirst 84
+DB fileid int32_t ld
+ARG recno db_recno_t lu
+WRLOCK meta_pgno db_pgno_t lu
+END
+
+/*
+ * mvptr
+ * Used when we change one or both of cur_recno and first_recno.
+ */
+BEGIN mvptr 85
+ARG opcode u_int32_t lu
+DB fileid int32_t ld
+ARG old_first db_recno_t lu
+ARG new_first db_recno_t lu
+ARG old_cur db_recno_t lu
+ARG new_cur db_recno_t lu
+POINTER metalsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+END
+
+
+/*
+ * del
+ * Used when we delete a record.
+ * recno is the record that is being deleted.
+ */
+BEGIN del 79
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+END
+
+/*
+ * add
+ * Used when we put a record on a page.
+ * recno is the record being added.
+ * data is the record itself.
+ */
+BEGIN add 80
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+ARG vflag u_int32_t lu
+DBT olddata DBT s
+END
+
+/*
+ * delext
+ * Used when we delete a record in extent based queue.
+ * recno is the record that is being deleted.
+ */
+BEGIN delext 83
+DB fileid int32_t ld
+POINTER lsn DB_LSN * lu
+ARG pgno db_pgno_t lu
+ARG indx u_int32_t lu
+ARG recno db_recno_t lu
+DBT data DBT s
+END
diff --git a/storage/bdb/qam/qam_conv.c b/storage/bdb/qam/qam_conv.c
new file mode 100644
index 00000000000..d89fe06b0cf
--- /dev/null
+++ b/storage/bdb/qam/qam_conv.c
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_conv.c,v 11.14 2002/08/06 06:17:02 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_mswap --
+ * Swap the bytes on the queue metadata page.
+ *
+ * PUBLIC: int __qam_mswap __P((PAGE *));
+ */
+int
+__qam_mswap(pg)
+ PAGE *pg;
+{
+ u_int8_t *p;
+
+ __db_metaswap(pg);
+
+ p = (u_int8_t *)pg + sizeof(DBMETA);
+
+ SWAP32(p); /* first_recno */
+ SWAP32(p); /* cur_recno */
+ SWAP32(p); /* re_len */
+ SWAP32(p); /* re_pad */
+ SWAP32(p); /* rec_page */
+ SWAP32(p); /* page_ext */
+ p += 91 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
+
+ return (0);
+}
+
+/*
+ * __qam_pgin_out --
+ * Convert host-specific page layout to/from the host-independent format
+ * stored on disk.
+ * We only need to fix up a few fields in the header
+ *
+ * PUBLIC: int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ */
+int
+__qam_pgin_out(dbenv, pg, pp, cookie)
+ DB_ENV *dbenv;
+ db_pgno_t pg;
+ void *pp;
+ DBT *cookie;
+{
+ DB_PGINFO *pginfo;
+ QPAGE *h;
+
+ COMPQUIET(pg, 0);
+ COMPQUIET(dbenv, NULL);
+ pginfo = (DB_PGINFO *)cookie->data;
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
+ return (0);
+
+ h = pp;
+ if (h->type == P_QAMMETA)
+ return (__qam_mswap(pp));
+
+ M_32_SWAP(h->lsn.file);
+ M_32_SWAP(h->lsn.offset);
+ M_32_SWAP(h->pgno);
+
+ return (0);
+}
diff --git a/storage/bdb/qam/qam_files.c b/storage/bdb/qam/qam_files.c
new file mode 100644
index 00000000000..f15a88d546d
--- /dev/null
+++ b/storage/bdb/qam/qam_files.c
@@ -0,0 +1,642 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_files.c,v 1.52 2002/08/26 17:52:18 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <stdlib.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_fprobe -- calculate and open extent
+ *
+ * Calculate which extent the page is in, open and create if necessary.
+ *
+ * PUBLIC: int __qam_fprobe
+ * PUBLIC: __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t));
+ */
+int
+__qam_fprobe(dbp, pgno, addrp, mode, flags)
+ DB *dbp;
+ db_pgno_t pgno;
+ void *addrp;
+ qam_probe_mode mode;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int8_t fid[DB_FILE_ID_LEN];
+ u_int32_t extid, maxext, openflags;
+ char buf[MAXPATHLEN];
+ int numext, offset, oldext, ret;
+
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+ ret = 0;
+
+ if (qp->page_ext == 0) {
+ mpf = dbp->mpf;
+ return (mode == QAM_PROBE_GET ?
+ mpf->get(mpf, &pgno, flags, addrp) :
+ mpf->put(mpf, addrp, flags));
+ }
+
+ mpf = NULL;
+
+ /*
+ * Need to lock long enough to find the mpf or create the file.
+ * The file cannot go away because we must have a record locked
+ * in that file.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ extid = (pgno - 1) / qp->page_ext;
+
+ /* Array1 will always be in use if array2 is in use. */
+ array = &qp->array1;
+ if (array->n_extent == 0) {
+ /* Start with 4 extents */
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ goto alloc;
+ }
+
+ offset = extid - qp->array1.low_extent;
+ if (qp->array2.n_extent != 0 &&
+ abs(offset) > abs(extid - qp->array2.low_extent)) {
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+ }
+
+ /*
+ * Check to see if the requested extent is outside the range of
+ * extents in the array. This is true by default if there are
+ * no extents here yet.
+ */
+ if (offset < 0 || (unsigned) offset >= array->n_extent) {
+ oldext = array->n_extent;
+ numext = array->hi_extent - array->low_extent + 1;
+ if (offset < 0 &&
+ (unsigned) -offset + numext <= array->n_extent) {
+ /*
+ * If we can fit this one into the existing array by
+ * shifting the existing entries then we do not have
+ * to allocate.
+ */
+ memmove(&array->mpfarray[-offset],
+ array->mpfarray, numext
+ * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0, -offset
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ } else if ((u_int32_t)offset == array->n_extent &&
+ mode != QAM_PROBE_MPF && array->mpfarray[0].pinref == 0) {
+ /*
+ * If this is at the end of the array and the file at
+ * the begining has a zero pin count we can close
+ * the bottom extent and put this one at the end.
+ */
+ mpf = array->mpfarray[0].mpf;
+ if (mpf != NULL && (ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+ memmove(&array->mpfarray[0], &array->mpfarray[1],
+ (array->n_extent - 1) * sizeof(array->mpfarray[0]));
+ array->low_extent++;
+ array->hi_extent++;
+ offset--;
+ array->mpfarray[offset].mpf = NULL;
+ array->mpfarray[offset].pinref = 0;
+ } else {
+ /*
+ * See if we have wrapped around the queue.
+ * If it has then allocate the second array.
+ * Otherwise just expand the one we are using.
+ */
+ maxext = (u_int32_t) UINT32_T_MAX
+ / (qp->page_ext * qp->rec_page);
+ if ((u_int32_t) abs(offset) >= maxext/2) {
+ array = &qp->array2;
+ DB_ASSERT(array->n_extent == 0);
+ oldext = 0;
+ array->n_extent = 4;
+ array->low_extent = extid;
+ offset = 0;
+ numext = 0;
+ } else {
+ /*
+ * Increase the size to at least include
+ * the new one and double it.
+ */
+ array->n_extent += abs(offset);
+ array->n_extent <<= 2;
+ }
+ alloc:
+ if ((ret = __os_realloc(dbenv,
+ array->n_extent * sizeof(struct __qmpf),
+ &array->mpfarray)) != 0)
+ goto err;
+
+ if (offset < 0) {
+ /*
+ * Move the array up and put the new one
+ * in the first slot.
+ */
+ offset = -offset;
+ memmove(&array->mpfarray[offset],
+ array->mpfarray,
+ numext * sizeof(array->mpfarray[0]));
+ memset(array->mpfarray, 0,
+ offset * sizeof(array->mpfarray[0]));
+ memset(&array->mpfarray[numext + offset], 0,
+ (array->n_extent - (numext + offset))
+ * sizeof(array->mpfarray[0]));
+ offset = 0;
+ }
+ else
+ /* Clear the new part of the array. */
+ memset(&array->mpfarray[oldext], 0,
+ (array->n_extent - oldext) *
+ sizeof(array->mpfarray[0]));
+ }
+ }
+
+ /* Update the low and hi range of saved extents. */
+ if (extid < array->low_extent)
+ array->low_extent = extid;
+ if (extid > array->hi_extent)
+ array->hi_extent = extid;
+
+ /* If the extent file is not yet open, open it. */
+ if (array->mpfarray[offset].mpf == NULL) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
+ if ((ret = dbenv->memp_fcreate(
+ dbenv, &array->mpfarray[offset].mpf, 0)) != 0)
+ goto err;
+ mpf = array->mpfarray[offset].mpf;
+ (void)mpf->set_lsn_offset(mpf, 0);
+ (void)mpf->set_pgcookie(mpf, &qp->pgcookie);
+
+ /* Set up the fileid for this extent. */
+ __qam_exid(dbp, fid, extid);
+ (void)mpf->set_fileid(mpf, fid);
+ openflags = DB_EXTENT;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ openflags |= DB_CREATE;
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ openflags |= DB_RDONLY;
+ if (F_ISSET(dbenv, DB_ENV_DIRECT_DB))
+ openflags |= DB_DIRECT;
+ if ((ret = mpf->open(
+ mpf, buf, openflags, qp->mode, dbp->pgsize)) != 0) {
+ array->mpfarray[offset].mpf = NULL;
+ (void)mpf->close(mpf, 0);
+ goto err;
+ }
+ }
+
+ mpf = array->mpfarray[offset].mpf;
+ if (mode == QAM_PROBE_GET)
+ array->mpfarray[offset].pinref++;
+ if (LF_ISSET(DB_MPOOL_CREATE))
+ mpf->set_unlink(mpf, 0);
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (ret == 0) {
+ if (mode == QAM_PROBE_MPF) {
+ *(DB_MPOOLFILE **)addrp = mpf;
+ return (0);
+ }
+ pgno--;
+ pgno %= qp->page_ext;
+ if (mode == QAM_PROBE_GET)
+ return (mpf->get(mpf, &pgno, flags, addrp));
+ ret = mpf->put(mpf, addrp, flags);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ array->mpfarray[offset].pinref--;
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ return (ret);
+}
+
+/*
+ * __qam_fclose -- close an extent.
+ *
+ * Calculate which extent the page is in and close it.
+ * We assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fclose __P((DB *, db_pgno_t));
+ */
+int
+__qam_fclose(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+ int offset, ret;
+
+ ret = 0;
+ dbenv = dbp->dbenv;
+ qp = (QUEUE *)dbp->q_internal;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+ /* If other threads are still using this file, leave it. */
+ if (array->mpfarray[offset].pinref != 0)
+ goto done;
+
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ ret = mpf->close(mpf, 0);
+
+done:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+
+/*
+ * __qam_fremove -- remove an extent.
+ *
+ * Calculate which extent the page is in and remove it. There is no way
+ * to remove an extent without probing it first and seeing that is is empty
+ * so we assume the mpf entry is present.
+ *
+ * PUBLIC: int __qam_fremove __P((DB *, db_pgno_t));
+ */
+int
+__qam_fremove(dbp, pgnoaddr)
+ DB *dbp;
+ db_pgno_t pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ u_int32_t extid;
+#if CONFIG_TEST
+ char buf[MAXPATHLEN], *real_name;
+#endif
+ int offset, ret;
+
+ qp = (QUEUE *)dbp->q_internal;
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ extid = (pgnoaddr - 1) / qp->page_ext;
+ array = &qp->array1;
+ if (array->low_extent > extid || array->hi_extent < extid)
+ array = &qp->array2;
+ offset = extid - array->low_extent;
+
+ DB_ASSERT(offset >= 0 && (unsigned) offset < array->n_extent);
+
+#if CONFIG_TEST
+ real_name = NULL;
+ /* Find the real name of the file. */
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, buf, 0, NULL, &real_name)) != 0)
+ goto err;
+#endif
+ /*
+ * The log must be flushed before the file is deleted. We depend on
+ * the log record of the last delete to recreate the file if we crash.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
+ mpf = array->mpfarray[offset].mpf;
+ array->mpfarray[offset].mpf = NULL;
+ mpf->set_unlink(mpf, 1);
+ if ((ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+
+ /*
+ * If the file is at the bottom of the array
+ * shift things down and adjust the end points.
+ */
+ if (offset == 0) {
+ memmove(array->mpfarray, &array->mpfarray[1],
+ (array->hi_extent - array->low_extent)
+ * sizeof(array->mpfarray[0]));
+ array->mpfarray[
+ array->hi_extent - array->low_extent].mpf = NULL;
+ if (array->low_extent != array->hi_extent)
+ array->low_extent++;
+ } else {
+ if (extid == array->hi_extent)
+ array->hi_extent--;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+#if CONFIG_TEST
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+#endif
+ return (ret);
+}
+
+/*
+ * __qam_sync --
+ * Flush the database cache.
+ *
+ * PUBLIC: int __qam_sync __P((DB *, u_int32_t));
+ */
+int
+__qam_sync(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int done, ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
+
+ if ((ret = __db_syncchk(dbp, flags)) != 0)
+ return (ret);
+
+ /* Read-only trees never need to be sync'd. */
+ if (F_ISSET(dbp, DB_AM_RDONLY))
+ return (0);
+
+ /* If the tree was never backed by a database file, we're done. */
+ if (F_ISSET(dbp, DB_AM_INMEM))
+ return (0);
+
+ /* Flush any dirty pages from the cache to the backing file. */
+ if ((ret = mpf->sync(dbp->mpf)) != 0)
+ return (ret);
+
+ qp = (QUEUE *)dbp->q_internal;
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* We do this for the side effect of opening all active extents. */
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ return (ret);
+
+ if (filelist == NULL)
+ return (0);
+
+ __os_free(dbp->dbenv, filelist);
+
+ done = 0;
+ qp = (QUEUE *)dbp->q_internal;
+ array = &qp->array1;
+
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+again:
+ mpfp = array->mpfarray;
+ for (i = array->low_extent; i <= array->hi_extent; i++, mpfp++)
+ if ((mpf = mpfp->mpf) != NULL) {
+ if ((ret = mpf->sync(mpf)) != 0)
+ goto err;
+ /*
+ * If we are the only ones with this file open
+ * then close it so it might be removed.
+ */
+ if (mpfp->pinref == 0) {
+ mpfp->mpf = NULL;
+ if ((ret = mpf->close(mpf, 0)) != 0)
+ goto err;
+ }
+ }
+
+ if (done == 0 && qp->array2.n_extent != 0) {
+ array = &qp->array2;
+ done = 1;
+ goto again;
+ }
+
+err:
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ return (ret);
+}
+
+/*
+ * __qam_gen_filelist -- generate a list of extent files.
+ * Another thread may close the handle so this should only
+ * be used single threaded or with care.
+ *
+ * PUBLIC: int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+ */
+int
+__qam_gen_filelist(dbp, filelistp)
+ DB *dbp;
+ QUEUE_FILELIST **filelistp;
+{
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ QUEUE *qp;
+ QMETA *meta;
+ db_pgno_t i, last, start;
+ db_recno_t current, first;
+ QUEUE_FILELIST *fp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ qp = (QUEUE *)dbp->q_internal;
+ *filelistp = NULL;
+
+ if (qp->page_ext == 0)
+ return (0);
+
+ /* This may happen during metapage recovery. */
+ if (qp->name == NULL)
+ return (0);
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
+ return (ret);
+
+ current = meta->cur_recno;
+ first = meta->first_recno;
+
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ return (ret);
+
+ last = QAM_RECNO_PAGE(dbp, current);
+ start = QAM_RECNO_PAGE(dbp, first);
+
+ /* Allocate the worst case plus 1 for null termination. */
+ if (last >= start)
+ ret = last - start + 2;
+ else
+ ret = last + (QAM_RECNO_PAGE(dbp, UINT32_T_MAX) - start) + 1;
+ if ((ret = __os_calloc(dbenv,
+ ret, sizeof(QUEUE_FILELIST), filelistp)) != 0)
+ return (ret);
+ fp = *filelistp;
+ i = start;
+
+again: for (; i <= last; i += qp->page_ext) {
+ if ((ret =
+ __qam_fprobe(dbp, i, &fp->mpf, QAM_PROBE_MPF, 0)) != 0) {
+ if (ret == ENOENT)
+ continue;
+ return (ret);
+ }
+ fp->id = (i - 1) / qp->page_ext;
+ fp++;
+ }
+
+ if (last < start) {
+ i = 1;
+ start = 0;
+ goto again;
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_extent_names -- generate a list of extent files names.
+ *
+ * PUBLIC: int __qam_extent_names __P((DB_ENV *, char *, char ***));
+ */
+int
+__qam_extent_names(dbenv, name, namelistp)
+ DB_ENV *dbenv;
+ char *name;
+ char ***namelistp;
+{
+ DB *dbp;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[MAXPATHLEN], *dir, **cp, *freep;
+ int cnt, len, ret;
+
+ *namelistp = NULL;
+ filelist = NULL;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret =
+ __db_open(dbp, NULL, name, NULL, DB_QUEUE, DB_RDONLY, 0)) != 0)
+ return (ret);
+ qp = dbp->q_internal;
+ if (qp->page_ext == 0)
+ goto done;
+
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ goto done;
+
+ if (filelist == NULL)
+ goto done;
+
+ cnt = 0;
+ for (fp = filelist; fp->mpf != NULL; fp++)
+ cnt++;
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ name = ((QUEUE *)dbp->q_internal)->name;
+
+ /* QUEUE_EXTENT contains extra chars, but add 6 anyway for the int. */
+ len = (u_int32_t)(cnt * (sizeof(**namelistp)
+ + strlen(QUEUE_EXTENT) + strlen(dir) + strlen(name) + 6));
+
+ if ((ret =
+ __os_malloc(dbp->dbenv, len, namelistp)) != 0)
+ goto done;
+ cp = *namelistp;
+ freep = (char *)(cp + cnt + 1);
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ len = (u_int32_t)strlen(buf);
+ *cp++ = freep;
+ strcpy(freep, buf);
+ freep += len + 1;
+ }
+ *cp = NULL;
+
+done:
+ if (filelist != NULL)
+ __os_free(dbp->dbenv, filelist);
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __qam_exid --
+ * Generate a fileid for an extent based on the fileid of the main
+ * file. Since we do not log schema creates/deletes explicitly, the log
+ * never captures the fileid of an extent file. In order that masters and
+ * replicas have the same fileids (so they can explicitly delete them), we
+ * use computed fileids for the extent files of Queue files.
+ *
+ * An extent file id retains the low order 12 bytes of the file id and
+ * overwrites the dev/inode fields, placing a 0 in the inode field, and
+ * the extent number in the dev field.
+ *
+ * PUBLIC: void __qam_exid __P((DB *, u_int8_t *, u_int32_t));
+ */
+void
+__qam_exid(dbp, fidp, exnum)
+ DB *dbp;
+ u_int8_t *fidp;
+ u_int32_t exnum;
+{
+ int i;
+ u_int8_t *p;
+
+ /* Copy the fileid from the master. */
+ memcpy(fidp, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* The first four bytes are the inode or the FileIndexLow; 0 it. */
+ for (i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = 0;
+
+ /* The next four bytes are the dev/FileIndexHigh; insert the exnum . */
+ for (p = (u_int8_t *)&exnum, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+}
diff --git a/storage/bdb/qam/qam_method.c b/storage/bdb/qam/qam_method.c
new file mode 100644
index 00000000000..5415fc5d00c
--- /dev/null
+++ b/storage/bdb/qam/qam_method.c
@@ -0,0 +1,413 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_method.c,v 11.55 2002/08/26 17:52:19 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __qam_set_extentsize __P((DB *, u_int32_t));
+
+struct __qam_cookie {
+ DB_LSN lsn;
+ QUEUE_FILELIST *filelist;
+};
+
+/*
+ * __qam_db_create --
+ * Queue specific initialization of the DB structure.
+ *
+ * PUBLIC: int __qam_db_create __P((DB *));
+ */
+int
+__qam_db_create(dbp)
+ DB *dbp;
+{
+ QUEUE *t;
+ int ret;
+
+ /* Allocate and initialize the private queue structure. */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(QUEUE), &t)) != 0)
+ return (ret);
+ dbp->q_internal = t;
+ dbp->set_q_extentsize = __qam_set_extentsize;
+
+ t->re_pad = ' ';
+
+ return (0);
+}
+
+/*
+ * __qam_db_close --
+ * Queue specific discard of the DB structure.
+ *
+ * PUBLIC: int __qam_db_close __P((DB *));
+ */
+int
+__qam_db_close(dbp)
+ DB *dbp;
+{
+ DB_MPOOLFILE *mpf;
+ MPFARRAY *array;
+ QUEUE *t;
+ struct __qmpf *mpfp;
+ u_int32_t i;
+ int ret, t_ret;
+
+ ret = 0;
+ if ((t = dbp->q_internal) == NULL)
+ return (0);
+
+ array = &t->array1;
+again:
+ mpfp = array->mpfarray;
+ if (mpfp != NULL) {
+ for (i = array->low_extent;
+ i <= array->hi_extent; i++, mpfp++) {
+ mpf = mpfp->mpf;
+ mpfp->mpf = NULL;
+ if (mpf != NULL &&
+ (t_ret = mpf->close(mpf, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ __os_free(dbp->dbenv, array->mpfarray);
+ }
+ if (t->array2.n_extent != 0) {
+ array = &t->array2;
+ array->n_extent = 0;
+ goto again;
+ }
+
+ if (t->path != NULL)
+ __os_free(dbp->dbenv, t->path);
+ __os_free(dbp->dbenv, t);
+ dbp->q_internal = NULL;
+
+ return (ret);
+}
+
+static int
+__qam_set_extentsize(dbp, extentsize)
+ DB *dbp;
+ u_int32_t extentsize;
+{
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_extentsize");
+
+ if (extentsize < 1) {
+ __db_err(dbp->dbenv, "Extent size must be at least 1");
+ return (EINVAL);
+ }
+
+ ((QUEUE*)dbp->q_internal)->page_ext = extentsize;
+
+ return (0);
+}
+
+/*
+ * __db_prqueue --
+ * Print out a queue
+ *
+ * PUBLIC: int __db_prqueue __P((DB *, FILE *, u_int32_t));
+ */
+int
+__db_prqueue(dbp, fp, flags)
+ DB *dbp;
+ FILE *fp;
+ u_int32_t flags;
+{
+ DB_MPOOLFILE *mpf;
+ PAGE *h;
+ QMETA *meta;
+ db_pgno_t first, i, last, pg_ext, stop;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ /* Find out the page number of the last page in the database. */
+ i = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
+ return (ret);
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ ret = __db_prpage(dbp, (PAGE *)meta, fp, flags);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ i = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+begin:
+ for (; i <= stop; ++i) {
+ if ((ret = __qam_fget(dbp, &i, 0, &h)) != 0) {
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+ if (pg_ext == 0) {
+ if (ret == DB_PAGE_NOTFOUND && first == last)
+ return (0);
+ return (ret);
+ }
+ if (ret == ENOENT || ret == DB_PAGE_NOTFOUND) {
+ i += pg_ext - ((i - 1) % pg_ext) - 1;
+ continue;
+ }
+ return (ret);
+ }
+ (void)__db_prpage(dbp, h, fp, flags);
+ if ((ret = __qam_fput(dbp, i, h, 0)) != 0)
+ return (ret);
+ }
+
+ if (first > last) {
+ i = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+ return (0);
+}
+
+/*
+ * __qam_remove
+ * Remove method for a Queue.
+ *
+ * PUBLIC: int __qam_remove __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, DB_LSN *));
+ */
+int
+__qam_remove(dbp, txn, name, subdb, lsnp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DB_LSN *lsnp;
+{
+ DB_ENV *dbenv;
+ DB *tmpdbp;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ int ret, needclose, t_ret;
+ char buf[MAXPATHLEN];
+ u_int8_t fid[DB_FILE_ID_LEN];
+
+ COMPQUIET(lsnp, NULL);
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ filelist = NULL;
+ needclose = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Subdatabases.
+ */
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Since regular remove no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /*
+ * We need to make sure we don't self-deadlock, so give
+ * this dbp the same locker as the incoming one.
+ */
+ tmpdbp->lid = dbp->lid;
+
+ /*
+ * If this is a transactional dbp and the open fails, then
+ * the transactional abort will close the dbp. If it's not
+ * a transactional open, then we always have to close it
+ * even if the open fails. Once the open has succeeded,
+ * then we will always want to close it.
+ */
+ if (txn == NULL)
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp,
+ txn, name, NULL, DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ needclose = 1;
+ }
+
+ qp = (QUEUE *)tmpdbp->q_internal;
+
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
+ goto err;
+
+ if (filelist == NULL)
+ goto err;
+
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
+ goto err;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+
+ /* Take care of object reclamation. */
+ __qam_exid(tmpdbp, fid, fp->id);
+ if ((ret = __fop_remove(dbenv,
+ txn, fid, buf, DB_APP_DATA)) != 0)
+ goto err;
+ }
+
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /*
+ * Since we copied the lid from the dbp, we'd better not
+ * free it here.
+ */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ return (ret);
+}
+
+/*
+ * __qam_rename
+ * Rename method for Queue.
+ *
+ * PUBLIC: int __qam_rename __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, const char *));
+ */
+int
+__qam_rename(dbp, txn, filename, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *filename, *subdb, *newname;
+{
+ DB_ENV *dbenv;
+ DB *tmpdbp;
+ MPFARRAY *ap;
+ QUEUE *qp;
+ QUEUE_FILELIST *fp, *filelist;
+ char buf[MAXPATHLEN], nbuf[MAXPATHLEN];
+ char *namep;
+ int ret, needclose, t_ret;
+ u_int8_t fid[DB_FILE_ID_LEN], *fidp;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+ filelist = NULL;
+ needclose = 0;
+
+ if (subdb != NULL) {
+ __db_err(dbenv,
+ "Queue does not support multiple databases per file");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Since regular rename no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /* Copy the incoming locker so we don't self-deadlock. */
+ tmpdbp->lid = dbp->lid;
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp, txn, filename, NULL,
+ DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ }
+
+ qp = (QUEUE *)tmpdbp->q_internal;
+
+ if (qp->page_ext != 0 &&
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
+ goto err;
+ if ((namep = __db_rpath(newname)) != NULL)
+ newname = namep + 1;
+
+ fidp = fid;
+ for (fp = filelist; fp != NULL && fp->mpf != NULL; fp++) {
+ fp->mpf->get_fileid(fp->mpf, fidp);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
+ goto err;
+ if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
+ ap = &qp->array1;
+ else
+ ap = &qp->array2;
+ ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ snprintf(nbuf, sizeof(nbuf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], newname, fp->id);
+ if ((ret = __fop_rename(dbenv,
+ txn, buf, nbuf, fidp, DB_APP_DATA)) != 0)
+ goto err;
+ }
+
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /* We copied this, so we mustn't free it. */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+ return (ret);
+}
diff --git a/storage/bdb/qam/qam_open.c b/storage/bdb/qam/qam_open.c
new file mode 100644
index 00000000000..efe4dfc540e
--- /dev/null
+++ b/storage/bdb/qam/qam_open.c
@@ -0,0 +1,331 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_open.c,v 11.55 2002/09/04 19:06:45 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/fop.h"
+
+static int __qam_init_meta __P((DB *, QMETA *));
+
+/*
+ * __qam_open
+ *
+ * PUBLIC: int __qam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, int, u_int32_t));
+ */
+int
+__qam_open(dbp, txn, name, base_pgno, mode, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ int mode;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ QMETA *qmeta;
+ QUEUE *t;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ t = dbp->q_internal;
+ ret = 0;
+ qmeta = NULL;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __qam_stat;
+ dbp->sync = __qam_sync;
+ dbp->db_am_remove = __qam_remove;
+ dbp->db_am_rename = __qam_rename;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp, txn, &dbc,
+ LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0))
+ != 0)
+ return (ret);
+
+ /*
+ * Get the meta data page. It must exist, because creates of
+ * files/databases come in through the __qam_new_file interface
+ * and queue doesn't support subdatabases.
+ */
+ if ((ret =
+ __db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &base_pgno, 0, (PAGE **)&qmeta)) != 0)
+ goto err;
+
+ /* If the magic number is incorrect, that's a fatal error. */
+ if (qmeta->dbmeta.magic != DB_QAMMAGIC) {
+ __db_err(dbenv, "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Setup information needed to open extents. */
+ t->page_ext = qmeta->page_ext;
+
+ if (t->page_ext != 0) {
+ t->pginfo.db_pagesize = dbp->pgsize;
+ t->pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ t->pginfo.type = dbp->type;
+ t->pgcookie.data = &t->pginfo;
+ t->pgcookie.size = sizeof(DB_PGINFO);
+
+ if ((ret = __os_strdup(dbp->dbenv, name, &t->path)) != 0)
+ return (ret);
+ t->dir = t->path;
+ if ((t->name = __db_rpath(t->path)) == NULL) {
+ t->name = t->path;
+ t->dir = PATH_DOT;
+ } else
+ *t->name++ = '\0';
+
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+ t->mode = mode;
+ }
+
+ if (name == NULL && t->page_ext != 0) {
+ __db_err(dbenv,
+ "Extent size may not be specified for in-memory queue database");
+ return (EINVAL);
+ }
+
+ t->re_pad = qmeta->re_pad;
+ t->re_len = qmeta->re_len;
+ t->rec_page = qmeta->rec_page;
+
+ t->q_meta = base_pgno;
+ t->q_root = base_pgno + 1;
+
+err: if (qmeta != NULL && (t_ret = mpf->put(mpf, qmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ (void)__LPUT(dbc, metalock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __qam_metachk --
+ *
+ * PUBLIC: int __qam_metachk __P((DB *, const char *, QMETA *));
+ */
+int
+__qam_metachk(dbp, name, qmeta)
+ DB *dbp;
+ const char *name;
+ QMETA *qmeta;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ ret = 0;
+
+ /*
+ * At this point, all we know is that the magic number is for a Queue.
+ * Check the version, the database may be out of date.
+ */
+ vers = qmeta->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 1:
+ case 2:
+ __db_err(dbenv,
+ "%s: queue version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 3:
+ case 4:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported qam version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __qam_mswap((PAGE *)qmeta)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_QUEUE && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_QUEUE;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_QUEUE);
+
+ /* Set the page size. */
+ dbp->pgsize = qmeta->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, qmeta->dbmeta.uid, DB_FILE_ID_LEN);
+
+ /* Set up AM-specific methods that do not require an open. */
+ dbp->db_am_rename = __qam_rename;
+ dbp->db_am_remove = __qam_remove;
+
+ return (ret);
+}
+
+/*
+ * __qam_init_meta --
+ * Initialize the meta-data for a Queue database.
+ */
+static int
+__qam_init_meta(dbp, meta)
+ DB *dbp;
+ QMETA *meta;
+{
+ QUEUE *t;
+
+ t = dbp->q_internal;
+
+ memset(meta, 0, sizeof(QMETA));
+ LSN_NOT_LOGGED(meta->dbmeta.lsn);
+ meta->dbmeta.pgno = PGNO_BASE_MD;
+ meta->dbmeta.last_pgno = 0;
+ meta->dbmeta.magic = DB_QAMMAGIC;
+ meta->dbmeta.version = DB_QAMVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_QAMMETA;
+ meta->re_pad = t->re_pad;
+ meta->re_len = t->re_len;
+ meta->rec_page = CALC_QAM_RECNO_PER_PAGE(dbp);
+ meta->cur_recno = 1;
+ meta->first_recno = 1;
+ meta->page_ext = t->page_ext;
+ t->rec_page = meta->rec_page;
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* Verify that we can fit at least one record per page. */
+ if (QAM_RECNO_PER_PAGE(dbp) < 1) {
+ __db_err(dbp->dbenv,
+ "Record size of %lu too large for page size of %lu",
+ (u_long)t->re_len, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_new_file --
+ * Create the necessary pages to begin a new queue database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __qam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__qam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ QMETA *meta;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ buf = NULL;
+ meta = NULL;
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (QMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __qam_init_meta(dbp, meta)) != 0)
+ goto err;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = DB_QUEUE;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
diff --git a/storage/bdb/qam/qam_rec.c b/storage/bdb/qam/qam_rec.c
new file mode 100644
index 00000000000..2c0f1227752
--- /dev/null
+++ b/storage/bdb/qam/qam_rec.c
@@ -0,0 +1,568 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_rec.c,v 11.69 2002/08/06 06:17:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __qam_incfirst_recover --
+ * Recovery function for incfirst.
+ *
+ * PUBLIC: int __qam_incfirst_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_incfirst_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ QUEUE_CURSOR *cp;
+ db_pgno_t metapg;
+ int exact, modified, ret, rec_ext;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_incfirst_print);
+ REC_INTRO(__qam_incfirst_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+
+ /*
+ * Only move first_recno backwards so we pick up the aborted delete.
+ * When going forward we need to be careful since
+ * we may have bumped over a locked record.
+ */
+ if (DB_UNDO(op)) {
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ modified = 1;
+ }
+ } else {
+ if (log_compare(&LSN(meta), lsnp) < 0) {
+ LSN(meta) = *lsnp;
+ modified = 1;
+ }
+ rec_ext = 0;
+ if (meta->page_ext != 0)
+ rec_ext = meta->page_ext * meta->rec_page;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ while (meta->first_recno != meta->cur_recno &&
+ !QAM_BEFORE_FIRST(meta, argp->recno + 1)) {
+ if ((ret = __qam_position(dbc,
+ &meta->first_recno, QAM_READ, &exact)) != 0)
+ goto err;
+ if (cp->page != NULL)
+ __qam_fput(file_dbp, cp->pgno, cp->page, 0);
+
+ if (exact == 1)
+ break;
+ if (cp->page != NULL &&
+ rec_ext != 0 && meta->first_recno % rec_ext == 0)
+ if ((ret =
+ __qam_fremove(file_dbp, cp->pgno)) != 0)
+ goto err;
+ meta->first_recno++;
+ if (meta->first_recno == RECNO_OOB)
+ meta->first_recno++;
+ modified = 1;
+ }
+ }
+
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err1;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)mpf->put(mpf, meta, 0);
+err1: (void)__LPUT(dbc, lock);
+ }
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_mvptr_recover --
+ * Recovery function for mvptr.
+ *
+ * PUBLIC: int __qam_mvptr_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_mvptr_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapg;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_mvptr_print);
+ REC_INTRO(__qam_mvptr_read, 1);
+
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto done;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ meta->dbmeta.pgno = metapg;
+ meta->dbmeta.type = P_QAMMETA;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ (void)__LPUT(dbc, lock);
+ goto out;
+ }
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->metalsn);
+
+ /*
+ * Under normal circumstances, we never undo a movement of one of
+ * the pointers. Just move them along regardless of abort/commit.
+ *
+ * If we're undoing a truncate, we need to reset the pointers to
+ * their state before the truncate.
+ */
+ if (DB_UNDO(op) && (argp->opcode & QAM_TRUNCATE)) {
+ if (cmp_n == 0) {
+ meta->first_recno = argp->old_first;
+ meta->cur_recno = argp->old_cur;
+ modified = 1;
+ meta->dbmeta.lsn = argp->metalsn;
+ }
+ } else if (cmp_p == 0) {
+ if (argp->opcode & QAM_SETFIRST)
+ meta->first_recno = argp->new_first;
+
+ if (argp->opcode & QAM_SETCUR)
+ meta->cur_recno = argp->new_cur;
+
+ modified = 1;
+ meta->dbmeta.lsn = *lsnp;
+ }
+
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+ (void)__LPUT(dbc, lock);
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_del_recover --
+ * Recovery function for del.
+ * Non-extent version or if there is no data (zero len).
+ *
+ * PUBLIC: int __qam_del_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_del_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_del_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_del_print);
+ REC_INTRO(__qam_del_read, 1);
+
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)mpf->put(mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ /* Need to undo delete - mark the record as present */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_SET(qp, QAM_VALID);
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_delext_recover --
+ * Recovery function for del in an extent based queue.
+ *
+ * PUBLIC: int __qam_delext_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_delext_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_delext_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_delext_print);
+ REC_INTRO(__qam_delext_read, 1);
+
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are redoing a delete and the page is not there
+ * we are done.
+ */
+ if (DB_REDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ modified = 0;
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_UNDO(op)) {
+ /* make sure first is behind us */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = __db_lget(dbc,
+ LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
+ (void)__LPUT(dbc, lock);
+ goto err;
+ }
+ if (meta->first_recno == RECNO_OOB ||
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
+ argp->recno < argp->recno - meta->cur_recno))) {
+ meta->first_recno = argp->recno;
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ (void)mpf->put(mpf, meta, 0);
+ (void)__LPUT(dbc, lock);
+
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->data)) != 0)
+ goto err;
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ modified = 1;
+ } else if (cmp_n > 0 && DB_REDO(op)) {
+ /* Need to redo delete - clear the valid bit */
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ }
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+out: REC_CLOSE;
+}
+
+/*
+ * __qam_add_recover --
+ * Recovery function for add.
+ *
+ * PUBLIC: int __qam_add_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__qam_add_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __qam_add_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ QAMDATA *qp;
+ QMETA *meta;
+ QPAGE *pagep;
+ db_pgno_t metapg;
+ int cmp_n, meta_dirty, modified, ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__qam_add_print);
+ REC_INTRO(__qam_add_read, 1);
+
+ modified = 0;
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are undoing an append and the page is not there
+ * we are done.
+ */
+ if (DB_UNDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
+
+ if (pagep->pgno == PGNO_INVALID) {
+ pagep->pgno = argp->pgno;
+ pagep->type = P_QAMDATA;
+ modified = 1;
+ }
+
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ if (DB_REDO(op)) {
+ /* Fix meta-data page. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0)
+ goto err;
+ meta_dirty = 0;
+ if (QAM_BEFORE_FIRST(meta, argp->recno)) {
+ meta->first_recno = argp->recno;
+ meta_dirty = 1;
+ }
+ if (argp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, argp->recno)) {
+ meta->cur_recno = argp->recno + 1;
+ meta_dirty = 1;
+ }
+ if ((ret =
+ mpf->put(mpf, meta, meta_dirty? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err;
+
+ /* Now update the actual page if necessary. */
+ if (cmp_n > 0) {
+ /* Need to redo add - put the record on page */
+ if ((ret = __qam_pitem(dbc,
+ pagep, argp->indx, argp->recno, &argp->data)) != 0)
+ goto err;
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ /* Make sure pointers include this record. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Need to undo add
+ * If this was an overwrite, put old record back.
+ * Otherwise just clear the valid bit
+ */
+ if (argp->olddata.size != 0) {
+ if ((ret = __qam_pitem(dbc, pagep,
+ argp->indx, argp->recno, &argp->olddata)) != 0)
+ goto err;
+
+ if (!(argp->vflag & QAM_VALID)) {
+ qp = QAM_GET_RECORD(
+ file_dbp, pagep, argp->indx);
+ F_CLR(qp, QAM_VALID);
+ }
+ modified = 1;
+ } else {
+ qp = QAM_GET_RECORD(file_dbp, pagep, argp->indx);
+ qp->flags = 0;
+ modified = 1;
+ }
+
+ /*
+ * Move the LSN back to this point; do not move it forward.
+ * Only move it back if we're in recovery. If we're in
+ * an abort, because we don't hold a page lock, we could
+ * foul up a concurrent put. Having too late an LSN
+ * is harmless in queue except when we're determining
+ * what we need to roll forward during recovery. [#2588]
+ */
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
+ LSN(pagep) = argp->lsn;
+ }
+
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
+
+out: REC_CLOSE;
+}
diff --git a/storage/bdb/qam/qam_stat.c b/storage/bdb/qam/qam_stat.c
new file mode 100644
index 00000000000..57c67da4292
--- /dev/null
+++ b/storage/bdb/qam/qam_stat.c
@@ -0,0 +1,203 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_stat.c,v 11.32 2002/05/11 13:40:11 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+/*
+ * __qam_stat --
+ * Gather/print the qam statistics
+ *
+ * PUBLIC: int __qam_stat __P((DB *, void *, u_int32_t));
+ */
+int
+__qam_stat(dbp, spp, flags)
+ DB *dbp;
+ void *spp;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
+ DB_QUEUE_STAT *sp;
+ PAGE *h;
+ QAMDATA *qp, *ep;
+ QMETA *meta;
+ QUEUE *t;
+ db_indx_t indx;
+ db_pgno_t first, last, pgno, pg_ext, stop;
+ u_int32_t re_len;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
+ sp = NULL;
+ t = dbp->q_internal;
+
+ /* Check for invalid flags. */
+ if ((ret = __db_statchk(dbp, flags)) != 0)
+ return (ret);
+
+ if (spp == NULL)
+ return (0);
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ return (ret);
+
+ DEBUG_LWRITE(dbc, NULL, "qam_stat", NULL, NULL, flags);
+
+ /* Allocate and clear the structure. */
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
+ goto err;
+ memset(sp, 0, sizeof(*sp));
+
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+
+ /* Determine the last page of the database. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS) {
+ sp->qs_nkeys = meta->dbmeta.key_count;
+ sp->qs_ndata = meta->dbmeta.record_count;
+ goto meta_only;
+ }
+
+ first = QAM_RECNO_PAGE(dbp, meta->first_recno);
+ last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
+
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ pgno = first;
+ if (first > last)
+ stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
+ else
+ stop = last;
+
+ /* Dump each page. */
+ pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
+begin:
+ /* Walk through the pages and count. */
+ for (; pgno <= stop; ++pgno) {
+ if ((ret =
+ __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ ret = __qam_fget(dbp, &pgno, 0, &h);
+ if (ret == ENOENT) {
+ pgno += pg_ext - 1;
+ continue;
+ }
+ if (ret == DB_PAGE_NOTFOUND) {
+ if (pg_ext == 0) {
+ if (pgno != stop && first != last)
+ goto err;
+ ret = 0;
+ break;
+ }
+ pgno += pg_ext - ((pgno - 1) % pg_ext) - 1;
+ continue;
+ }
+ if (ret != 0)
+ goto err;
+
+ ++sp->qs_pages;
+
+ ep = (QAMDATA *)((u_int8_t *)h + dbp->pgsize - re_len);
+ for (indx = 0, qp = QAM_GET_RECORD(dbp, h, indx);
+ qp <= ep;
+ ++indx, qp = QAM_GET_RECORD(dbp, h, indx)) {
+ if (F_ISSET(qp, QAM_VALID))
+ sp->qs_ndata++;
+ else
+ sp->qs_pgfree += re_len;
+ }
+
+ if ((ret = __qam_fput(dbp, pgno, h, 0)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+ }
+
+ (void)__LPUT(dbc, lock);
+ if (first > last) {
+ pgno = 1;
+ stop = last;
+ first = last;
+ goto begin;
+ }
+
+ /* Get the meta-data page. */
+ if ((ret = __db_lget(dbc,
+ 0, t->q_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ goto err;
+
+ if (!F_ISSET(dbp, DB_AM_RDONLY))
+ meta->dbmeta.key_count =
+ meta->dbmeta.record_count = sp->qs_ndata;
+ sp->qs_nkeys = sp->qs_ndata;
+
+meta_only:
+ /* Get the metadata fields. */
+ sp->qs_magic = meta->dbmeta.magic;
+ sp->qs_version = meta->dbmeta.version;
+ sp->qs_metaflags = meta->dbmeta.flags;
+ sp->qs_pagesize = meta->dbmeta.pagesize;
+ sp->qs_extentsize = meta->page_ext;
+ sp->qs_re_len = meta->re_len;
+ sp->qs_re_pad = meta->re_pad;
+ sp->qs_first_recno = meta->first_recno;
+ sp->qs_cur_recno = meta->cur_recno;
+
+ /* Discard the meta-data page. */
+ if ((ret = mpf->put(mpf,
+ meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ (void)__LPUT(dbc, lock);
+
+ *(DB_QUEUE_STAT **)spp = sp;
+ ret = 0;
+
+ if (0) {
+err: if (sp != NULL)
+ __os_ufree(dbp->dbenv, sp);
+ }
+
+ (void)__LPUT(dbc, lock);
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/qam/qam_upgrade.c b/storage/bdb/qam/qam_upgrade.c
new file mode 100644
index 00000000000..6bd79fc948a
--- /dev/null
+++ b/storage/bdb/qam/qam_upgrade.c
@@ -0,0 +1,108 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_upgrade.c,v 11.12 2002/03/29 20:46:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_upgrade.h"
+
+/*
+ * __qam_31_qammeta --
+ * Upgrade the database from version 1 to version 2.
+ *
+ * PUBLIC: int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_31_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA31 *newmeta;
+ QMETA30 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA31 *)buf;
+ oldmeta = (QMETA30 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * They may overlap so start at the bottom and use memmove().
+ */
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->start = oldmeta->start;
+ memmove(newmeta->dbmeta.uid,
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
+ newmeta->dbmeta.record_count = 0;
+ newmeta->dbmeta.key_count = 0;
+ ZERO_LSN(newmeta->dbmeta.unused3);
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 2;
+
+ return (0);
+}
+
+/*
+ * __qam_32_qammeta --
+ * Upgrade the database from version 2 to version 3.
+ *
+ * PUBLIC: int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+ */
+int
+__qam_32_qammeta(dbp, real_name, buf)
+ DB *dbp;
+ char *real_name;
+ u_int8_t *buf;
+{
+ QMETA32 *newmeta;
+ QMETA31 *oldmeta;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(real_name, NULL);
+
+ newmeta = (QMETA32 *)buf;
+ oldmeta = (QMETA31 *)buf;
+
+ /*
+ * Copy the fields to their new locations.
+ * We are dropping the first field so move
+ * from the top.
+ */
+ newmeta->first_recno = oldmeta->first_recno;
+ newmeta->cur_recno = oldmeta->cur_recno;
+ newmeta->re_len = oldmeta->re_len;
+ newmeta->re_pad = oldmeta->re_pad;
+ newmeta->rec_page = oldmeta->rec_page;
+ newmeta->page_ext = 0;
+ /* cur_recno now points to the first free slot. */
+ newmeta->cur_recno++;
+ if (newmeta->first_recno == 0)
+ newmeta->first_recno = 1;
+
+ /* Update the version. */
+ newmeta->dbmeta.version = 3;
+
+ return (0);
+}
diff --git a/storage/bdb/qam/qam_verify.c b/storage/bdb/qam/qam_verify.c
new file mode 100644
index 00000000000..5b020c2c335
--- /dev/null
+++ b/storage/bdb/qam/qam_verify.c
@@ -0,0 +1,200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: qam_verify.c,v 1.30 2002/06/26 20:49:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
+
+/*
+ * __qam_vrfy_meta --
+ * Verify the queue-specific part of a metadata page.
+ *
+ * PUBLIC: int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QMETA *meta;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ int isbad, ret, t_ret;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ return (ret);
+ isbad = 0;
+
+ /*
+ * Queue can't be used in subdatabases, so if this isn't set
+ * something very odd is going on.
+ */
+ if (!F_ISSET(pip, VRFY_INCOMPLETE))
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue databases must be one-per-file",
+ (u_long)pgno));
+
+ /*
+ * cur_recno/rec_page
+ * Cur_recno may be one beyond the end of the page and
+ * we start numbering from 1.
+ */
+ if (vdp->last_pgno > 0 && meta->cur_recno > 0 &&
+ meta->cur_recno - 1 > meta->rec_page * vdp->last_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: current recno %lu references record past last page number %lu",
+ (u_long)pgno,
+ (u_long)meta->cur_recno, (u_long)vdp->last_pgno));
+ isbad = 1;
+ }
+
+ /*
+ * re_len: If this is bad, we can't safely verify queue data pages, so
+ * return DB_VERIFY_FATAL
+ */
+ if (ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) *
+ meta->rec_page + QPAGE_SZ(dbp) > dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record length %lu too high for page size and recs/page",
+ (u_long)pgno, (u_long)meta->re_len));
+ ret = DB_VERIFY_FATAL;
+ goto err;
+ } else {
+ vdp->re_len = meta->re_len;
+ vdp->rec_page = meta->rec_page;
+ }
+
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
+}
+
+/*
+ * __qam_vrfy_data --
+ * Verify a queue data page.
+ *
+ * PUBLIC: int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *,
+ * PUBLIC: db_pgno_t, u_int32_t));
+ */
+int
+__qam_vrfy_data(dbp, vdp, h, pgno, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ QPAGE *h;
+ db_pgno_t pgno;
+ u_int32_t flags;
+{
+ DB fakedb;
+ struct __queue fakeq;
+ QAMDATA *qp;
+ db_recno_t i;
+ u_int8_t qflags;
+
+ /*
+ * Not much to do here, except make sure that flags are reasonable.
+ *
+ * QAM_GET_RECORD assumes a properly initialized q_internal
+ * structure, however, and we don't have one, so we play
+ * some gross games to fake it out.
+ */
+ fakedb.q_internal = &fakeq;
+ fakedb.flags = dbp->flags;
+ fakeq.re_len = vdp->re_len;
+
+ for (i = 0; i < vdp->rec_page; i++) {
+ qp = QAM_GET_RECORD(&fakedb, h, i);
+ if ((u_int8_t *)qp >= (u_int8_t *)h + dbp->pgsize) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record %lu extends past end of page",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+
+ qflags = qp->flags;
+ qflags &= !(QAM_VALID | QAM_SET);
+ if (qflags != 0) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue record %lu has bad flags",
+ (u_long)pgno, (u_long)i));
+ return (DB_VERIFY_BAD);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * __qam_vrfy_structure --
+ * Verify a queue database structure, such as it is.
+ *
+ * PUBLIC: int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+ */
+int
+__qam_vrfy_structure(dbp, vdp, flags)
+ DB *dbp;
+ VRFY_DBINFO *vdp;
+ u_int32_t flags;
+{
+ VRFY_PAGEINFO *pip;
+ db_pgno_t i;
+ int ret, isbad;
+
+ isbad = 0;
+
+ if ((ret = __db_vrfy_getpageinfo(vdp, PGNO_BASE_MD, &pip)) != 0)
+ return (ret);
+
+ if (pip->type != P_QAMMETA) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue database has no meta page",
+ (u_long)PGNO_BASE_MD));
+ isbad = 1;
+ goto err;
+ }
+
+ if ((ret = __db_vrfy_pgset_inc(vdp->pgset, 0)) != 0)
+ goto err;
+
+ for (i = 1; i <= vdp->last_pgno; i++) {
+ /* Send feedback to the application about our progress. */
+ if (!LF_ISSET(DB_SALVAGE))
+ __db_vrfy_struct_feedback(dbp, vdp);
+
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
+ (ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
+ return (ret);
+ if (!F_ISSET(pip, VRFY_IS_ALLZEROES) &&
+ pip->type != P_QAMDATA) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue database page of incorrect type %lu",
+ (u_long)i, (u_long)pip->type));
+ isbad = 1;
+ goto err;
+ } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, i)) != 0)
+ goto err;
+ }
+
+err: if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
+ return (ret);
+ return (isbad == 1 ? DB_VERIFY_BAD : 0);
+}
diff --git a/storage/bdb/rep/rep_method.c b/storage/bdb/rep/rep_method.c
new file mode 100644
index 00000000000..6773a537f4f
--- /dev/null
+++ b/storage/bdb/rep/rep_method.c
@@ -0,0 +1,1144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_method.c,v 1.78 2002/09/10 12:58:07 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __rep_abort_prepared __P((DB_ENV *));
+static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
+static int __rep_client_dbinit __P((DB_ENV *, int));
+static int __rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+static int __rep_elect_init __P((DB_ENV *, DB_LSN *, int, int, int, int *));
+static int __rep_flush __P((DB_ENV *));
+static int __rep_restore_prepared __P((DB_ENV *));
+static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_rep_transport __P((DB_ENV *, int,
+ int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
+static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t));
+
+/*
+ * __rep_dbenv_create --
+ * Replication-specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_create __P((DB_ENV *));
+ */
+int
+__rep_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ COMPQUIET(db_rep, NULL);
+ COMPQUIET(ret, 0);
+ dbenv->rep_elect = __dbcl_rep_elect;
+ dbenv->rep_flush = __dbcl_rep_flush;
+ dbenv->rep_process_message = __dbcl_rep_process_message;
+ dbenv->rep_start = __dbcl_rep_start;
+ dbenv->rep_stat = __dbcl_rep_stat;
+ dbenv->set_rep_limit = __dbcl_rep_set_limit;
+ dbenv->set_rep_request = __dbcl_rep_set_request;
+ dbenv->set_rep_transport = __dbcl_rep_set_rep_transport;
+
+ } else
+#endif
+ {
+ dbenv->rep_elect = __rep_elect;
+ dbenv->rep_flush = __rep_flush;
+ dbenv->rep_process_message = __rep_process_message;
+ dbenv->rep_start = __rep_start;
+ dbenv->rep_stat = __rep_stat;
+ dbenv->set_rep_limit = __rep_set_limit;
+ dbenv->set_rep_request = __rep_set_request;
+ dbenv->set_rep_transport = __rep_set_rep_transport;
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_REP), &db_rep)) != 0)
+ return (ret);
+ dbenv->rep_handle = db_rep;
+
+ /* Initialize the per-process replication structure. */
+ db_rep->rep_send = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_start --
+ * Become a master or client, and start sending messages to participate
+ * in the replication environment. Must be called after the environment
+ * is open.
+ */
+static int
+__rep_start(dbenv, dbt, flags)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int announce, init_db, redo_prepared, ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "rep_start");
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->rep_start", flags,
+ DB_REP_CLIENT | DB_REP_LOGSONLY | DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* Exactly one of CLIENT and MASTER must be specified. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_CLIENT, DB_REP_MASTER)) != 0)
+ return (ret);
+ if (!LF_ISSET(DB_REP_CLIENT | DB_REP_MASTER | DB_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->rep_start: replication mode must be specified");
+ return (EINVAL);
+ }
+
+ /* Masters can't be logs-only. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_LOGSONLY, DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* We need a transport function. */
+ if (db_rep->rep_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport must be called before DB_ENV->rep_start");
+ return (EINVAL);
+ }
+
+ /* We'd better not have any logged files open if we are a client. */
+ if (LF_ISSET(DB_REP_CLIENT) && (ret = __dbreg_nofiles(dbenv)) != 0) {
+ __db_err(dbenv, "DB_ENV->rep_start called with open files");
+ return (ret);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (rep->eid == DB_EID_INVALID)
+ rep->eid = dbenv->rep_eid;
+
+ if (LF_ISSET(DB_REP_MASTER)) {
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT)) {
+ /*
+ * If we're upgrading from having been a client,
+ * preclose, so that we close our temporary database.
+ *
+ * Do not close files that we may have opened while
+ * doing a rep_apply; they'll get closed when we
+ * finally close the environment, but for now, leave
+ * them open, as we don't want to recycle their
+ * fileids, and we may need the handles again if
+ * we become a client and the original master
+ * that opened them becomes a master again.
+ */
+ if ((ret = __rep_preclose(dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Now write a __txn_recycle record so that
+ * clients don't get confused with our txnids
+ * and txnids of previous masters.
+ */
+ F_CLR(dbenv, DB_ENV_REP_CLIENT);
+ if ((ret = __txn_reset(dbenv)) != 0)
+ return (ret);
+ }
+
+ redo_prepared = 0;
+ if (!F_ISSET(rep, REP_F_MASTER)) {
+ /* Master is not yet set. */
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_ISCLIENT);
+ rep->gen = ++rep->w_gen;
+ redo_prepared = 1;
+ } else if (rep->gen == 0)
+ rep->gen = 1;
+ }
+
+ F_SET(rep, REP_F_MASTER);
+ F_SET(dbenv, DB_ENV_REP_MASTER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Send the NEWMASTER message, then restore prepared txns
+ * if and only if we just upgraded from being a client.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0)) == 0 &&
+ redo_prepared)
+ ret = __rep_restore_prepared(dbenv);
+ } else {
+ F_CLR(dbenv, DB_ENV_REP_MASTER);
+ F_SET(dbenv, DB_ENV_REP_CLIENT);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(dbenv, DB_ENV_REP_LOGSONLY);
+
+ announce = !F_ISSET(rep, REP_ISCLIENT) ||
+ rep->master_id == DB_EID_INVALID;
+ init_db = 0;
+ if (!F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_F_MASTER);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(rep, REP_F_LOGSONLY);
+ else
+ F_SET(rep, REP_F_UPGRADE);
+
+ /*
+ * We initialize the client's generation number to 0.
+ * Upon startup, it looks for a master and updates the
+ * generation number as necessary, exactly as it does
+ * during normal operation and a master failure.
+ */
+ rep->gen = 0;
+ rep->master_id = DB_EID_INVALID;
+ init_db = 1;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Abort any prepared transactions that were restored
+ * by recovery. We won't be able to create any txns of
+ * our own until they're resolved, but we can't resolve
+ * them ourselves; the master has to. If any get
+ * resolved as commits, we'll redo them when commit
+ * records come in. Aborts will simply be ignored.
+ */
+ if ((ret = __rep_abort_prepared(dbenv)) != 0)
+ return (ret);
+
+ if ((ret = __rep_client_dbinit(dbenv, init_db)) != 0)
+ return (ret);
+
+ /*
+ * If this client created a newly replicated environment,
+ * then announce the existence of this client. The master
+ * should respond with a message that will tell this client
+ * the current generation number and the current LSN. This
+ * will allow the client to either perform recovery or
+ * simply join in.
+ */
+ if (announce)
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __rep_client_dbinit --
+ *
+ * Initialize the LSN database on the client side. This is called from the
+ * client initialization code. The startup flag value indicates if
+ * this is the first thread/process starting up and therefore should create
+ * the LSN database. This routine must be called once by each process acting
+ * as a client.
+ */
+static int
+__rep_client_dbinit(dbenv, startup)
+ DB_ENV *dbenv;
+ int startup;
+{
+ DB_REP *db_rep;
+ DB *dbp;
+ int ret, t_ret;
+ u_int32_t flags;
+
+ PANIC_CHECK(dbenv);
+ db_rep = dbenv->rep_handle;
+ dbp = NULL;
+
+#define REPDBNAME "__db.rep.db"
+
+ /* Check if this has already been called on this environment. */
+ if (db_rep->rep_db != NULL)
+ return (0);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+
+ if (startup) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ /*
+ * Ignore errors, because if the file doesn't exist, this
+ * is perfectly OK.
+ */
+ (void)dbp->remove(dbp, REPDBNAME, NULL, 0);
+ }
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ if ((ret = dbp->set_bt_compare(dbp, __rep_bt_cmp)) != 0)
+ goto err;
+
+ /* Allow writes to this database on a client. */
+ F_SET(dbp, DB_AM_CL_WRITER);
+
+ flags = (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0) |
+ (startup ? DB_CREATE : 0);
+ if ((ret = dbp->open(dbp, NULL,
+ "__db.rep.db", NULL, DB_BTREE, flags, 0)) != 0)
+ goto err;
+
+ db_rep->rep_db = dbp;
+
+ if (0) {
+err: if (dbp != NULL &&
+ (t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ db_rep->rep_db = NULL;
+ }
+
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ return (ret);
+}
+
+/*
+ * __rep_bt_cmp --
+ *
+ * Comparison function for the LSN table. We use the entire control
+ * structure as a key (for simplicity, so we don't have to merge the
+ * other fields in the control with the data field), but really only
+ * care about the LSNs.
+ */
+static int
+__rep_bt_cmp(dbp, dbt1, dbt2)
+ DB *dbp;
+ const DBT *dbt1, *dbt2;
+{
+ DB_LSN lsn1, lsn2;
+ REP_CONTROL *rp1, *rp2;
+
+ COMPQUIET(dbp, NULL);
+
+ rp1 = dbt1->data;
+ rp2 = dbt2->data;
+
+ __ua_memcpy(&lsn1, &rp1->lsn, sizeof(DB_LSN));
+ __ua_memcpy(&lsn2, &rp2->lsn, sizeof(DB_LSN));
+
+ if (lsn1.file > lsn2.file)
+ return (1);
+
+ if (lsn1.file < lsn2.file)
+ return (-1);
+
+ if (lsn1.offset > lsn2.offset)
+ return (1);
+
+ if (lsn1.offset < lsn2.offset)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * __rep_abort_prepared --
+ * Abort any prepared transactions that recovery restored.
+ *
+ * This is used by clients that have just run recovery, since
+ * they cannot/should not call txn_recover and handle prepared transactions
+ * themselves.
+ */
+static int
+__rep_abort_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+#define PREPLISTSIZE 50
+ DB_PREPLIST prep[PREPLISTSIZE], *p;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_aborts, ret;
+ long count, i;
+ u_int32_t op;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ do_aborts = 0;
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region->stat.st_nrestores != 0)
+ do_aborts = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_aborts) {
+ op = DB_FIRST;
+ do {
+ if ((ret = dbenv->txn_recover(dbenv,
+ prep, PREPLISTSIZE, &count, op)) != 0)
+ return (ret);
+ for (i = 0; i < count; i++) {
+ p = &prep[i];
+ if ((ret = p->txn->abort(p->txn)) != 0)
+ return (ret);
+ }
+ op = DB_NEXT;
+ } while (count == PREPLISTSIZE);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_restore_prepared --
+ * Restore to a prepared state any prepared but not yet committed
+ * transactions.
+ *
+ * This performs, in effect, a "mini-recovery"; it is called from
+ * __rep_start by newly upgraded masters. There may be transactions that an
+ * old master prepared but did not resolve, which we need to restore to an
+ * active state.
+ */
+static int
+__rep_restore_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, lsn;
+ DBT rec;
+ __txn_ckp_args *ckp_args;
+ __txn_regop_args *regop_args;
+ __txn_xa_regop_args *prep_args;
+ int ret, t_ret;
+ u_int32_t hi_txn, low_txn, rectype;
+ void *txninfo;
+
+ txninfo = NULL;
+ ckp_args = NULL;
+ prep_args = NULL;
+ regop_args = NULL;
+ ZERO_LSN(ckp_lsn);
+ ZERO_LSN(lsn);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /*
+ * We need to consider the set of records between the most recent
+ * checkpoint LSN and the end of the log; any txn in that
+ * range, and only txns in that range, could still have been
+ * active, and thus prepared but not yet committed (PBNYC),
+ * when the old master died.
+ *
+ * Find the most recent checkpoint LSN, and get the record there.
+ * If there is no checkpoint in the log, start off by getting
+ * the very first record in the log instead.
+ */
+ memset(&rec, 0, sizeof(DBT));
+ if ((ret = __txn_getckp(dbenv, &lsn)) == 0) {
+ if ((ret = logc->get(logc, &lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint record at LSN [%lu][%lu] not found",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ if ((ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ ckp_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
+ if ((ret = logc->get(logc, &ckp_lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint LSN record [%lu][%lu] not found",
+ (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ } else if ((ret = logc->get(logc, &lsn, &rec, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ /* An empty log means no PBNYC txns. */
+ ret = 0;
+ goto done;
+ }
+ __db_err(dbenv, "Attempt to get first log record failed");
+ goto err;
+ }
+
+ /*
+ * We use the same txnlist infrastructure that recovery does;
+ * it demands an estimate of the high and low txnids for
+ * initialization.
+ *
+ * First, the low txnid.
+ */
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&low_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(low_txn));
+ if (low_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_NEXT)) == 0);
+
+ /* If there are no txns, there are no PBNYC txns. */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* Now, the high txnid. */
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0) {
+ /*
+ * Note that DB_NOTFOUND is unacceptable here because we
+ * had to have looked at some log record to get this far.
+ */
+ __db_err(dbenv, "Final log record not found");
+ goto err;
+ }
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&hi_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(hi_txn));
+ if (hi_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0);
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* We have a high and low txnid. Initialise the txn list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, low_txn, hi_txn, NULL, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Now, walk backward from the end of the log to ckp_lsn. Any
+ * prepares that we hit without first hitting a commit or
+ * abort belong to PBNYC txns, and we need to apply them and
+ * restore them to a prepared state.
+ *
+ * Note that we wind up applying transactions out of order.
+ * Since all PBNYC txns still held locks on the old master and
+ * were isolated, this should be safe.
+ */
+ for (ret = logc->get(logc, &lsn, &rec, DB_LAST);
+ ret == 0 && log_compare(&lsn, &ckp_lsn) > 0;
+ ret = logc->get(logc, &lsn, &rec, DB_PREV)) {
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ switch (rectype) {
+ case DB___txn_regop:
+ /*
+ * It's a commit or abort--but we don't care
+ * which! Just add it to the list of txns
+ * that are resolved.
+ */
+ if ((ret = __txn_regop_read(dbenv, rec.data,
+ &regop_args)) != 0)
+ goto err;
+
+ ret = __db_txnlist_find(dbenv,
+ txninfo, regop_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv, txninfo,
+ regop_args->txnid->txnid,
+ regop_args->opcode, &lsn);
+ __os_free(dbenv, regop_args);
+ break;
+ case DB___txn_xa_regop:
+ /*
+ * It's a prepare. If we haven't put the
+ * txn on our list yet, it hasn't been
+ * resolved, so apply and restore it.
+ */
+ if ((ret = __txn_xa_regop_read(dbenv, rec.data,
+ &prep_args)) != 0)
+ goto err;
+ ret = __db_txnlist_find(dbenv, txninfo,
+ prep_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ if ((ret = __rep_process_txn(dbenv, &rec)) == 0)
+ ret = __txn_restore_txn(dbenv,
+ &lsn, prep_args);
+ __os_free(dbenv, prep_args);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* It's not an error to have hit the beginning of the log. */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+done:
+err: t_ret = logc->close(logc, 0);
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_set_limit --
+ * Set a limit on the amount of data that will be sent during a single
+ * invocation of __rep_process_message.
+ */
+static int
+__rep_set_limit(dbenv, gbytes, bytes)
+ DB_ENV *dbenv;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+{
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_limit: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (bytes > GIGABYTE) {
+ gbytes += bytes / GIGABYTE;
+ bytes = bytes % GIGABYTE;
+ }
+ rep->gbytes = gbytes;
+ rep->bytes = bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ return (0);
+}
+
+/*
+ * __rep_set_request --
+ * Set the minimum and maximum number of log records that we wait
+ * before retransmitting.
+ * UNDOCUMENTED.
+ */
+static int
+__rep_set_request(dbenv, min, max)
+ DB_ENV *dbenv;
+ u_int32_t min;
+ u_int32_t max;
+{
+ LOG *lp;
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_request: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->request_gap = min;
+ rep->max_gap = max;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = dbenv->lg_handle;
+ if (dblp != NULL && (lp = dblp->reginfo.primary) != NULL) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_set_transport --
+ * Set the transport function for replication.
+ */
+static int
+__rep_set_rep_transport(dbenv, eid, f_send)
+ DB_ENV *dbenv;
+ int eid;
+ int (*f_send) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+{
+ DB_REP *db_rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+
+ if (f_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: no send function specified");
+ return (EINVAL);
+ }
+
+ if (eid < 0) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: eid must be greater than or equal to 0");
+ return (EINVAL);
+ }
+
+ db_rep->rep_send = f_send;
+
+ dbenv->rep_eid = eid;
+ return (0);
+}
+
+/*
+ * __rep_elect --
+ * Called after master failure to hold/participate in an election for
+ * a new master.
+ */
+static int
+__rep_elect(dbenv, nsites, priority, timeout, eidp)
+ DB_ENV *dbenv;
+ int nsites, priority;
+ u_int32_t timeout;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int in_progress, ret, send_vote, tiebreaker;
+ u_int32_t pid, sec, usec;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_elect", DB_INIT_TXN);
+
+ /* Error checking. */
+ if (nsites <= 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: nsites must be greater than 0");
+ return (EINVAL);
+ }
+ if (priority < 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: priority may not be negative");
+ return (EINVAL);
+ }
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Generate a randomized tiebreaker value. */
+ __os_id(&pid);
+ if ((ret = __os_clock(dbenv, &sec, &usec)) != 0)
+ return (ret);
+ tiebreaker = pid ^ sec ^ usec ^ (u_int)rand() ^ P_TO_UINT32(&pid);
+
+ if ((ret = __rep_elect_init(dbenv,
+ &lsn, nsites, priority, tiebreaker, &in_progress)) != 0) {
+ if (ret == DB_REP_NEWMASTER) {
+ ret = 0;
+ *eidp = dbenv->rep_eid;
+ }
+ return (ret);
+ }
+
+ if (!in_progress) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Beginning an election");
+#endif
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_ELECT, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTSEND, ret, NULL);
+ }
+
+ /* Now send vote */
+ if ((ret =
+ __rep_send_vote(dbenv, &lsn, nsites, priority, tiebreaker)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL);
+
+ ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE1);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT1, ret, NULL);
+ switch (ret) {
+ case 0:
+ /* Check if election complete or phase complete. */
+ if (*eidp != DB_EID_INVALID)
+ return (0);
+ goto phase2;
+ case DB_TIMEOUT:
+ break;
+ default:
+ goto err;
+ }
+ /*
+ * If we got here, we haven't heard from everyone, but we've
+ * run out of time, so it's time to decide if we have enough
+ * votes to pick a winner and if so, to send out a vote to
+ * the winner.
+ */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ send_vote = DB_EID_INVALID;
+ if (rep->sites > rep->nsites / 2) {
+ /* We think we've seen enough to cast a vote. */
+ send_vote = rep->winner;
+ if (rep->winner == rep->eid)
+ rep->votes++;
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (send_vote == DB_EID_INVALID) {
+ /* We do not have enough votes to elect. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not enough votes to elect: received %d of %d",
+ rep->sites, rep->nsites);
+#endif
+ ret = DB_REP_UNAVAIL;
+ goto err;
+
+ }
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) &&
+ send_vote != rep->eid)
+ __db_err(dbenv, "Sending vote");
+#endif
+
+ if (send_vote != rep->eid && (ret = __rep_send_message(dbenv,
+ send_vote, REP_VOTE2, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE2, ret, NULL);
+
+phase2: ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE2);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT2, ret, NULL);
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_TIMEOUT:
+ ret = DB_REP_UNAVAIL;
+ break;
+ default:
+ goto err;
+ }
+
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Ended election with %d", ret);
+#endif
+ return (ret);
+}
+
+/*
+ * __rep_elect_init
+ * Initialize an election. Sets beginp non-zero if the election is
+ * already in progress; makes it 0 otherwise.
+ */
+static int
+__rep_elect_init(dbenv, lsnp, nsites, priority, tiebreaker, beginp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, priority, tiebreaker, *beginp;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret, *tally;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ ret = 0;
+
+ /* We may miscount, as we don't hold the replication mutex here. */
+ rep->stat.st_elections++;
+
+ /* If we are already a master; simply broadcast that fact and return. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, lsnp, NULL, 0);
+ rep->stat.st_elections_won++;
+ return (DB_REP_NEWMASTER);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ *beginp = IN_ELECTION(rep);
+ if (!*beginp) {
+ /*
+ * Make sure that we always initialize all the election fields
+ * before putting ourselves in an election state. That means
+ * issuing calls that can fail (allocation) before setting all
+ * the variables.
+ */
+ if (nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, nsites)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL);
+ rep->nsites = nsites;
+ rep->priority = priority;
+ rep->votes = 0;
+ rep->master_id = DB_EID_INVALID;
+ F_SET(rep, REP_F_EPHASE1);
+
+ /* We have always heard from ourselves. */
+ rep->sites = 1;
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ tally[0] = rep->eid;
+
+ if (priority != 0) {
+ /* Make ourselves the winner to start. */
+ rep->winner = rep->eid;
+ rep->w_priority = priority;
+ rep->w_gen = rep->gen;
+ rep->w_lsn = *lsnp;
+ rep->w_tiebreaker = tiebreaker;
+ } else {
+ rep->winner = DB_EID_INVALID;
+ rep->w_priority = 0;
+ rep->w_gen = 0;
+ ZERO_LSN(rep->w_lsn);
+ rep->w_tiebreaker = 0;
+ }
+ }
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+static int
+__rep_wait(dbenv, timeout, eidp, flags)
+ DB_ENV *dbenv;
+ u_int32_t timeout;
+ int *eidp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int done, ret;
+ u_int32_t sleeptime;
+
+ done = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /*
+ * The user specifies an overall timeout function, but checking
+ * is cheap and the timeout may be a generous upper bound.
+ * Sleep repeatedly for the smaller of .5s and timeout/10.
+ */
+ sleeptime = (timeout > 5000000) ? 500000 : timeout / 10;
+ if (sleeptime == 0)
+ sleeptime++;
+ while (timeout > 0) {
+ if ((ret = __os_sleep(dbenv, 0, sleeptime)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID;
+
+ *eidp = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (done)
+ return (0);
+
+ if (timeout > sleeptime)
+ timeout -= sleeptime;
+ else
+ timeout = 0;
+ }
+ return (DB_TIMEOUT);
+}
+
+/*
+ * __rep_flush --
+ * Re-push the last log record to all clients, in case they've lost
+ * messages and don't know it.
+ */
+static int
+__rep_flush(dbenv)
+ DB_ENV *dbenv;
+{
+ DBT rec;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ memset(&rec, 0, sizeof(rec));
+ memset(&lsn, 0, sizeof(lsn));
+
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0)
+ goto err;
+
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, &rec, 0);
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __rep_stat --
+ * Fetch replication statistics.
+ */
+static int
+__rep_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_REP_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ DB_REP_STAT *stats;
+ LOG *lp;
+ REP *rep;
+ u_int32_t queued;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ /* Allocate a stat struct to return to the user. */
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_REP_STAT), &stats)) != 0)
+ return (ret);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ memcpy(stats, &rep->stat, sizeof(*stats));
+
+ /* Copy out election stats. */
+ if (IN_ELECTION(rep)) {
+ if (F_ISSET(rep, REP_F_EPHASE1))
+ stats->st_election_status = 1;
+ else if (F_ISSET(rep, REP_F_EPHASE2))
+ stats->st_election_status = 2;
+
+ stats->st_election_nsites = rep->sites;
+ stats->st_election_cur_winner = rep->winner;
+ stats->st_election_priority = rep->w_priority;
+ stats->st_election_gen = rep->w_gen;
+ stats->st_election_lsn = rep->w_lsn;
+ stats->st_election_votes = rep->votes;
+ stats->st_election_tiebreaker = rep->w_tiebreaker;
+ }
+
+ /* Copy out other info that's protected by the rep mutex. */
+ stats->st_env_id = rep->eid;
+ stats->st_env_priority = rep->priority;
+ stats->st_nsites = rep->nsites;
+ stats->st_master = rep->master_id;
+ stats->st_gen = rep->gen;
+
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_status = DB_REP_MASTER;
+ else if (F_ISSET(rep, REP_F_LOGSONLY))
+ stats->st_status = DB_REP_LOGSONLY;
+ else if (F_ISSET(rep, REP_F_UPGRADE))
+ stats->st_status = DB_REP_CLIENT;
+ else
+ stats->st_status = 0;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ queued = rep->stat.st_log_queued;
+ memset(&rep->stat, 0, sizeof(rep->stat));
+ rep->stat.st_log_queued = rep->stat.st_log_queued_total =
+ rep->stat.st_log_queued_max = queued;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Log-related replication info is stored in the log system and
+ * protected by the log region lock.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ stats->st_next_lsn = lp->ready_lsn;
+ stats->st_waiting_lsn = lp->waiting_lsn;
+ } else {
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_next_lsn = lp->lsn;
+ else
+ ZERO_LSN(stats->st_next_lsn);
+ ZERO_LSN(stats->st_waiting_lsn);
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/storage/bdb/rep/rep_record.c b/storage/bdb/rep/rep_record.c
new file mode 100644
index 00000000000..d3619f509b4
--- /dev/null
+++ b/storage/bdb/rep/rep_record.c
@@ -0,0 +1,1513 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_record.c,v 1.111 2002/09/11 19:39:11 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *));
+static int __rep_collect_txn __P((DB_ENV *, DB_LSN *, LSN_COLLECTION *));
+static int __rep_lsn_cmp __P((const void *, const void *));
+static int __rep_newfile __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *));
+
+#define IS_SIMPLE(R) ((R) != DB___txn_regop && \
+ (R) != DB___txn_ckp && (R) != DB___dbreg_register)
+
+/*
+ * __rep_process_message --
+ *
+ * This routine takes an incoming message and processes it.
+ *
+ * control: contains the control fields from the record
+ * rec: contains the actual record
+ * eidp: contains the machine id of the sender of the message;
+ * in the case of a DB_NEWMASTER message, returns the eid
+ * of the new master.
+ *
+ * PUBLIC: int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+ */
+int
+__rep_process_message(dbenv, control, rec, eidp)
+ DB_ENV *dbenv;
+ DBT *control, *rec;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN init_lsn, lsn, newfilelsn, oldfilelsn;
+ DB_REP *db_rep;
+ DBT *d, data_dbt, lsndbt, mylog;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL *rp;
+ REP_VOTE_INFO *vi;
+ u_int32_t bytes, gen, gbytes, type, unused;
+ int check_limit, cmp, done, do_req, i;
+ int master, old, recovering, ret, t_ret, *tally;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ /* Control argument must be non-Null. */
+ if (control == NULL || control->size == 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: control argument must be specified");
+ return (EINVAL);
+ }
+
+ ret = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gen = rep->gen;
+ recovering = F_ISSET(rep, REP_F_RECOVER);
+
+ rep->stat.st_msgs_processed++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ rp = (REP_CONTROL *)control->data;
+
+#if 0
+ __rep_print_message(dbenv, *eidp, rp, "rep_process_message");
+#endif
+
+ /* Complain if we see an improper version number. */
+ if (rp->rep_version != DB_REPVERSION) {
+ __db_err(dbenv,
+ "unexpected replication message version %d, expected %d",
+ rp->rep_version, DB_REPVERSION);
+ return (EINVAL);
+ }
+ if (rp->log_version != DB_LOGVERSION) {
+ __db_err(dbenv,
+ "unexpected log record version %d, expected %d",
+ rp->log_version, DB_LOGVERSION);
+ return (EINVAL);
+ }
+
+ /*
+ * Check for generation number matching. Ignore any old messages
+ * except requests that are indicative of a new client that needs
+ * to get in sync.
+ */
+ if (rp->gen < gen && rp->rectype != REP_ALIVE_REQ &&
+ rp->rectype != REP_NEWCLIENT && rp->rectype != REP_MASTER_REQ) {
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_msgs_badgen++;
+ return (0);
+ }
+ if (rp->gen > gen && rp->rectype != REP_ALIVE &&
+ rp->rectype != REP_NEWMASTER)
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0));
+
+ /*
+ * We need to check if we're in recovery and if we are
+ * then we need to ignore any messages except VERIFY, VOTE,
+ * ELECT (the master might fail while we are recovering), and
+ * ALIVE_REQ.
+ */
+ if (recovering)
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ case REP_ALIVE_REQ:
+ case REP_ELECT:
+ case REP_NEWCLIENT:
+ case REP_NEWMASTER:
+ case REP_NEWSITE:
+ case REP_VERIFY:
+ R_LOCK(dbenv, &dblp->reginfo);
+ cmp = log_compare(&lp->verify_lsn, &rp->lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (cmp != 0)
+ goto skip;
+ /* FALLTHROUGH */
+ case REP_VOTE1:
+ case REP_VOTE2:
+ break;
+ default:
+skip: /*
+ * We don't hold the rep mutex, and could
+ * miscount if we race.
+ */
+ rep->stat.st_msgs_recover++;
+
+ /* Check for need to retransmit. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ do_req = *eidp == rep->master_id &&
+ ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs *= 2;
+ if (lp->wait_recs + rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ lp->rcvd_recs = 0;
+ lsn = lp->verify_lsn;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (do_req)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_VERIFY_REQ, &lsn, NULL, 0);
+
+ return (ret);
+ }
+
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ ANYSITE(dbenv);
+ if (rp->gen > gen && rp->flags)
+ return (__rep_new_master(dbenv, rp, *eidp));
+ break;
+ case REP_ALIVE_REQ:
+ ANYSITE(dbenv);
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_ALIVE, &lsn, NULL,
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ? 1 : 0));
+ case REP_ALL_REQ:
+ MASTER_ONLY(dbenv);
+ gbytes = bytes = 0;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gbytes = rep->gbytes;
+ bytes = rep->bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ check_limit = gbytes != 0 || bytes != 0;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ oldfilelsn = lsn = rp->lsn;
+ type = REP_LOG;
+ for (ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ ret == 0 && type == REP_LOG;
+ ret = logc->get(logc, &lsn, &data_dbt, DB_NEXT)) {
+ /*
+ * lsn.offset will only be 0 if this is the
+ * beginning of the log; DB_SET, but not DB_NEXT,
+ * can set the log cursor to [n][0].
+ */
+ if (lsn.offset == 0)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+ else {
+ /*
+ * DB_NEXT will never run into offsets
+ * of 0; thus, when a log file changes,
+ * we'll have a real log record with
+ * some lsn [n][m], and we'll also want to send
+ * a NEWFILE message with lsn [n][0].
+ * So that the client can detect gaps,
+ * send in the rec parameter the
+ * last LSN in the old file.
+ */
+ if (lsn.file != oldfilelsn.file) {
+ newfilelsn.file = lsn.file;
+ newfilelsn.offset = 0;
+
+ memset(&lsndbt, 0, sizeof(DBT));
+ lsndbt.size = sizeof(DB_LSN);
+ lsndbt.data = &oldfilelsn;
+
+ if ((ret = __rep_send_message(dbenv,
+ *eidp, REP_NEWFILE, &newfilelsn,
+ &lsndbt, 0)) != 0)
+ break;
+ }
+ if (check_limit) {
+ /*
+ * data_dbt.size is only the size of
+ * the log record; it doesn't count
+ * the size of the control structure.
+ * Factor that in as well so we're
+ * not off by a lot if our log
+ * records are small.
+ */
+ while (bytes < data_dbt.size +
+ sizeof(REP_CONTROL)) {
+ if (gbytes > 0) {
+ bytes += GIGABYTE;
+ --gbytes;
+ continue;
+ }
+ /*
+ * We don't hold the rep mutex,
+ * and may miscount.
+ */
+ rep->stat.st_nthrottles++;
+ type = REP_LOG_MORE;
+ goto send;
+ }
+ bytes -= (data_dbt.size +
+ sizeof(REP_CONTROL));
+ }
+send: ret = __rep_send_message(dbenv, *eidp,
+ type, &lsn, &data_dbt, 0);
+ }
+
+ /*
+ * In case we're about to change files and need it
+ * for a NEWFILE message, save the current LSN.
+ */
+ oldfilelsn = lsn;
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_ELECT:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->gen++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = IN_ELECTION(rep) ? 0 : DB_REP_HOLDELECTION;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+#ifdef NOTYET
+ case REP_FILE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_FILE_REQ:
+ MASTER_ONLY(dbenv);
+ return (__rep_send_file(dbenv, rec, *eidp));
+ break;
+#endif
+ case REP_LOG:
+ case REP_LOG_MORE:
+ CLIENT_ONLY(dbenv);
+ if ((ret = __rep_apply(dbenv, rp, rec)) != 0)
+ return (ret);
+ if (rp->rectype == REP_LOG_MORE) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ master = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &lsn, NULL, 0);
+ }
+ return (ret);
+ case REP_LOG_REQ:
+ MASTER_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ lsn = rp->lsn;
+
+ /*
+ * There are three different cases here.
+ * 1. We asked for a particular LSN and got it.
+ * 2. We asked for an LSN of X,0 which is invalid and got the
+ * first log record in a particular file.
+ * 3. We asked for an LSN and it's not found because it is
+ * beyond the end of a log file and we need a NEWFILE msg.
+ */
+ ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ cmp = log_compare(&lsn, &rp->lsn);
+
+ if (ret == 0 && cmp == 0) /* Case 1 */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_LOG, &rp->lsn, &data_dbt, 0);
+ else if (ret == DB_NOTFOUND ||
+ (ret == 0 && cmp < 0 && rp->lsn.offset == 0))
+ /* Cases 2 and 3: Send a NEWFILE message. */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_NEWSITE:
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_newsites++;
+
+ /* This is a rebroadcast; simply tell the application. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0);
+ }
+ return (DB_REP_NEWSITE);
+ case REP_NEWCLIENT:
+ /*
+ * This message was received and should have resulted in the
+ * application entering the machine ID in its machine table.
+ * We respond to this with an ALIVE to send relevant information
+ * to the new client. But first, broadcast the new client's
+ * record to all the clients.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWSITE, &rp->lsn, rec, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ /* FALLTHROUGH */
+ case REP_MASTER_REQ:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ case REP_NEWFILE:
+ CLIENT_ONLY(dbenv);
+ return (__rep_apply(dbenv, rp, rec));
+ case REP_NEWMASTER:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER) &&
+ *eidp != dbenv->rep_eid) {
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_dupmasters++;
+ return (DB_REP_DUPMASTER);
+ }
+ return (__rep_new_master(dbenv, rp, *eidp));
+ case REP_PAGE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PAGE_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_PLIST: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PLIST_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_VERIFY:
+ CLIENT_ONLY(dbenv);
+ DB_ASSERT((F_ISSET(rep, REP_F_RECOVER) &&
+ !IS_ZERO_LSN(lp->verify_lsn)) ||
+ (!F_ISSET(rep, REP_F_RECOVER) &&
+ IS_ZERO_LSN(lp->verify_lsn)));
+ if (IS_ZERO_LSN(lp->verify_lsn))
+ return (0);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&mylog, 0, sizeof(mylog));
+ if ((ret = logc->get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
+ goto rep_verify_err;
+ if (mylog.size == rec->size &&
+ memcmp(mylog.data, rec->data, rec->size) == 0) {
+ /*
+ * If we're a logs-only client, we can simply truncate
+ * the log to the point where it last agreed with the
+ * master's; otherwise, recover to that point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ ZERO_LSN(lp->verify_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ INIT_LSN(init_lsn);
+ if ((ret = dbenv->log_flush(dbenv,
+ &rp->lsn)) != 0 ||
+ (ret = __log_vtruncate(dbenv,
+ &rp->lsn, &init_lsn)) != 0)
+ goto rep_verify_err;
+ } else if ((ret = __db_apprec(dbenv, &rp->lsn, 0)) != 0)
+ goto rep_verify_err;
+
+ /*
+ * The log has been truncated (either by __db_apprec or
+ * directly). We want to make sure we're waiting for
+ * the LSN at the new end-of-log, not some later point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->ready_lsn = lp->lsn;
+ ZERO_LSN(lp->waiting_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Discard any log records we have queued; we're
+ * about to re-request them, and can't trust the
+ * ones in the queue.
+ */
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ if ((ret = db_rep->rep_db->truncate(db_rep->rep_db,
+ NULL, &unused, 0)) != 0) {
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ goto rep_verify_err;
+ }
+ rep->stat.st_log_queued = 0;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to bring ourselves up to date again anyway.
+ */
+ if ((master = rep->master_id) == DB_EID_INVALID) {
+ DB_ASSERT(IN_ELECTION(rep));
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = 0;
+ } else {
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &rp->lsn, NULL, 0);
+ }
+ } else if ((ret =
+ logc->get(logc, &lsn, &mylog, DB_PREV)) == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = lsn;
+ lp->rcvd_recs = 0;
+ lp->wait_recs = rep->request_gap;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv,
+ *eidp, REP_VERIFY_REQ, &lsn, NULL, 0);
+ }
+
+rep_verify_err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VERIFY_FAIL:
+ rep->stat.st_outdated++;
+ return (DB_REP_OUTDATED);
+ case REP_VERIFY_REQ:
+ MASTER_ONLY(dbenv);
+ type = REP_VERIFY;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ d = &data_dbt;
+ memset(d, 0, sizeof(data_dbt));
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ ret = logc->get(logc, &rp->lsn, d, DB_SET);
+ /*
+ * If the LSN was invalid, then we might get a not
+ * found, we might get an EIO, we could get anything.
+ * If we get a DB_NOTFOUND, then there is a chance that
+ * the LSN comes before the first file present in which
+ * case we need to return a fail so that the client can return
+ * a DB_OUTDATED.
+ */
+ if (ret == DB_NOTFOUND &&
+ __log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
+ old != 0)
+ type = REP_VERIFY_FAIL;
+
+ if (ret != 0)
+ d = NULL;
+
+ ret = __rep_send_message(dbenv, *eidp, type, &rp->lsn, d, 0);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VOTE1:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Master received vote");
+#endif
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ vi = (REP_VOTE_INFO *)rec->data;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * If you get a vote and you're not in an election, simply
+ * return an indicator to hold an election which will trigger
+ * this site to send its vote again.
+ */
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not in election, but received vote1");
+#endif
+ ret = DB_REP_HOLDELECTION;
+ goto unlock;
+ }
+
+ if (F_ISSET(rep, REP_F_EPHASE2))
+ goto unlock;
+
+ /* Check if this site knows about more sites than we do. */
+ if (vi->nsites > rep->nsites)
+ rep->nsites = vi->nsites;
+
+ /* Check if we've heard from this site already. */
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ for (i = 0; i < rep->sites; i++) {
+ if (tally[i] == *eidp)
+ /* Duplicate vote. */
+ goto unlock;
+ }
+
+ /*
+ * We are keeping vote, let's see if that changes our count of
+ * the number of sites.
+ */
+ if (rep->sites + 1 > rep->nsites)
+ rep->nsites = rep->sites + 1;
+ if (rep->nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0)
+ goto unlock;
+
+ tally[rep->sites] = *eidp;
+ rep->sites++;
+
+ /*
+ * Change winners if the incoming record has a higher
+ * priority, or an equal priority but a larger LSN, or
+ * an equal priority and LSN but higher "tiebreaker" value.
+ */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv,
+ "%s(eid)%d (pri)%d (gen)%d (sites)%d [%d,%d]",
+ "Existing vote: ",
+ rep->winner, rep->w_priority, rep->w_gen,
+ rep->sites, rep->w_lsn.file, rep->w_lsn.offset);
+ __db_err(dbenv,
+ "Incoming vote: (eid)%d (pri)%d (gen)%d [%d,%d]",
+ *eidp, vi->priority, rp->gen, rp->lsn.file,
+ rp->lsn.offset);
+ }
+#endif
+ cmp = log_compare(&rp->lsn, &rep->w_lsn);
+ if (vi->priority > rep->w_priority ||
+ (vi->priority != 0 && vi->priority == rep->w_priority &&
+ (cmp > 0 ||
+ (cmp == 0 && vi->tiebreaker > rep->w_tiebreaker)))) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Accepting new vote");
+#endif
+ rep->winner = *eidp;
+ rep->w_priority = vi->priority;
+ rep->w_lsn = rp->lsn;
+ rep->w_gen = rp->gen;
+ }
+ master = rep->winner;
+ lsn = rep->w_lsn;
+ done = rep->sites == rep->nsites && rep->w_priority != 0;
+ if (done) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv, "Phase1 election done");
+ __db_err(dbenv, "Voting for %d%s",
+ master, master == rep->eid ? "(self)" : "");
+ }
+#endif
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+
+ if (done && master == rep->eid) {
+ rep->votes++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (0);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /* Vote for someone else. */
+ if (done)
+ return (__rep_send_message(dbenv,
+ master, REP_VOTE2, NULL, NULL, 0));
+
+ /* Election is still going on. */
+ break;
+ case REP_VOTE2:
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "We received a vote%s",
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ?
+ " (master)" : "");
+#endif
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ rep->stat.st_elections_won++;
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* If we have priority 0, we should never get a vote. */
+ DB_ASSERT(rep->priority != 0);
+
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Not in election, got vote");
+#endif
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (DB_REP_HOLDELECTION);
+ }
+ /* avoid counting duplicates. */
+ rep->votes++;
+ done = rep->votes > rep->nsites / 2;
+ if (done) {
+ rep->master_id = rep->eid;
+ rep->gen = rep->w_gen + 1;
+ ELECTION_DONE(rep);
+ F_CLR(rep, REP_F_UPGRADE);
+ F_SET(rep, REP_F_MASTER);
+ *eidp = rep->master_id;
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Got enough votes to win; election done; winner is %d",
+ rep->master_id);
+#endif
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (done) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Declare me the winner. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "I won, sending NEWMASTER");
+#endif
+ rep->stat.st_elections_won++;
+ if ((ret = __rep_send_message(dbenv, DB_EID_BROADCAST,
+ REP_NEWMASTER, &lsn, NULL, 0)) != 0)
+ break;
+ return (DB_REP_NEWMASTER);
+ }
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: unknown replication message: type %lu",
+ (u_long)rp->rectype);
+ return (EINVAL);
+ }
+
+ return (0);
+
+unlock: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_apply --
+ *
+ * Handle incoming log records on a client, applying when possible and
+ * entering into the bookkeeping table otherwise. This is the guts of
+ * the routine that handles the state machine that describes how we
+ * process and manage incoming log records.
+ */
+static int
+__rep_apply(dbenv, rp, rec)
+ DB_ENV *dbenv;
+ REP_CONTROL *rp;
+ DBT *rec;
+{
+ __dbreg_register_args dbreg_args;
+ __txn_ckp_args ckp_args;
+ DB_REP *db_rep;
+ DBT control_dbt, key_dbt, lsn_dbt, nextrec_dbt, rec_dbt;
+ DB *dbp;
+ DBC *dbc;
+ DB_LOG *dblp;
+ DB_LSN ckp_lsn, lsn, newfile_lsn, next_lsn, waiting_lsn;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL lsn_rc;
+ u_int32_t rectype, txnid;
+ int cmp, do_req, eid, have_mutex, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dbp = db_rep->rep_db;
+ dbc = NULL;
+ have_mutex = ret = 0;
+ memset(&control_dbt, 0, sizeof(control_dbt));
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+
+ /*
+ * If this is a log record and it's the next one in line, simply
+ * write it to the log. If it's a "normal" log record, i.e., not
+ * a COMMIT or CHECKPOINT or something that needs immediate processing,
+ * just return. If it's a COMMIT, CHECKPOINT or LOG_REGISTER (i.e.,
+ * not SIMPLE), handle it now. If it's a NEWFILE record, then we
+ * have to be prepared to deal with a logfile change.
+ */
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = dblp->reginfo.primary;
+ cmp = log_compare(&rp->lsn, &lp->ready_lsn);
+
+ /*
+ * This is written to assume that you don't end up with a lot of
+ * records after a hole. That is, it optimizes for the case where
+ * there is only a record or two after a hole. If you have a lot
+ * of records after a hole, what you'd really want to do is write
+ * all of them and then process all the commits, checkpoints, etc.
+ * together. That is more complicated processing that we can add
+ * later if necessary.
+ *
+ * That said, I really don't want to do db operations holding the
+ * log mutex, so the synchronization here is tricky.
+ */
+ if (cmp == 0) {
+ /* We got the log record that we are expecting. */
+ if (rp->rectype == REP_NEWFILE) {
+newfile: ret = __rep_newfile(dbenv, rp, rec, &lp->ready_lsn);
+
+ /* Make this evaluate to a simple rectype. */
+ rectype = 0;
+ } else {
+ DB_ASSERT(log_compare(&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv, &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ if (ret == 0)
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_records++;
+ }
+ while (ret == 0 && IS_SIMPLE(rectype) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) {
+ /*
+ * We just filled in a gap in the log record stream.
+ * Write subsequent records to the log.
+ */
+gap_check: lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (have_mutex == 0) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 1;
+ }
+ if (dbc == NULL &&
+ (ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+
+ /* The DBTs need to persist through another call. */
+ F_SET(&control_dbt, DB_DBT_REALLOC);
+ F_SET(&rec_dbt, DB_DBT_REALLOC);
+ if ((ret = dbc->c_get(dbc,
+ &control_dbt, &rec_dbt, DB_RMW | DB_FIRST)) != 0)
+ goto err;
+
+ rp = (REP_CONTROL *)control_dbt.data;
+ rec = &rec_dbt;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ R_LOCK(dbenv, &dblp->reginfo);
+ /*
+ * We need to check again, because it's possible that
+ * some other thread of control changed the waiting_lsn
+ * or removed that record from the database.
+ */
+ if (log_compare(&lp->ready_lsn, &rp->lsn) == 0) {
+ if (rp->rectype != REP_NEWFILE) {
+ DB_ASSERT(log_compare
+ (&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv,
+ &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ if (ret == 0)
+ rep->stat.st_log_records++;
+ } else {
+ ret = __rep_newfile(dbenv,
+ rp, rec, &lp->ready_lsn);
+ rectype = 0;
+ }
+ waiting_lsn = lp->waiting_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We may miscount, as we don't hold the rep
+ * mutex.
+ */
+ --rep->stat.st_log_queued;
+
+ /*
+ * Update waiting_lsn. We need to move it
+ * forward to the LSN of the next record
+ * in the queue.
+ */
+ memset(&lsn_dbt, 0, sizeof(lsn_dbt));
+ F_SET(&lsn_dbt, DB_DBT_USERMEM);
+ lsn_dbt.data = &lsn_rc;
+ lsn_dbt.ulen = sizeof(lsn_rc);
+ memset(&lsn_rc, 0, sizeof(lsn_rc));
+
+ /*
+ * If the next item in the database is a log
+ * record--the common case--we're not
+ * interested in its contents, just in its LSN.
+ * If it's a newfile message, though, the
+ * data field may be the LSN of the last
+ * record in the old file, and we need to use
+ * that to determine whether or not there's
+ * a gap.
+ *
+ * Optimize both these cases by doing a partial
+ * get of the data item. If it's a newfile
+ * record, we'll get the whole LSN, and if
+ * it's not, we won't waste time allocating.
+ */
+ memset(&nextrec_dbt, 0, sizeof(nextrec_dbt));
+ F_SET(&nextrec_dbt,
+ DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ nextrec_dbt.ulen =
+ nextrec_dbt.dlen = sizeof(newfile_lsn);
+ ZERO_LSN(newfile_lsn);
+ nextrec_dbt.data = &newfile_lsn;
+
+ ret = dbc->c_get(dbc,
+ &lsn_dbt, &nextrec_dbt, DB_NEXT);
+ if (ret != DB_NOTFOUND && ret != 0)
+ goto err;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (ret == DB_NOTFOUND) {
+ /*
+ * Do a quick double-check to make
+ * sure waiting_lsn hasn't changed.
+ * It's possible that between the
+ * DB_NOTFOUND return and the R_LOCK,
+ * some record was added to the
+ * database, and we don't want to lose
+ * sight of the fact that it's there.
+ */
+ if (log_compare(&waiting_lsn,
+ &lp->waiting_lsn) == 0)
+ ZERO_LSN(
+ lp->waiting_lsn);
+
+ /*
+ * Whether or not the current record is
+ * simple, there's no next one, and
+ * therefore we haven't got anything
+ * else to do right now. Break out.
+ */
+ break;
+ }
+
+ DB_ASSERT(lsn_dbt.size == sizeof(lsn_rc));
+
+ /*
+ * NEWFILE records have somewhat convoluted
+ * semantics, so there are five cases
+ * pertaining to what the newly-gotten record
+ * is and what we want to do about it.
+ *
+ * 1) This isn't a NEWFILE record. Advance
+ * waiting_lsn and proceed.
+ *
+ * 2) NEWFILE, no LSN stored as the datum,
+ * lsn_rc.lsn == ready_lsn. The NEWFILE
+ * record is next, so set waiting_lsn =
+ * ready_lsn.
+ *
+ * 3) NEWFILE, no LSN stored as the datum, but
+ * lsn_rc.lsn > ready_lsn. There's still a
+ * gap; set waiting_lsn = lsn_rc.lsn.
+ *
+ * 4) NEWFILE, newfile_lsn in datum, and it's <
+ * ready_lsn. (If the datum is non-empty,
+ * it's the LSN of the last record in a log
+ * file, not the end of the log, and
+ * lsn_rc.lsn is the LSN of the start of
+ * the new file--we didn't have the end of
+ * the old log handy when we sent the
+ * record.) No gap--we're ready to
+ * proceed. Set both waiting and ready_lsn
+ * to lsn_rc.lsn.
+ *
+ * 5) NEWFILE, newfile_lsn in datum, and it's >=
+ * ready_lsn. We're still missing at
+ * least one record; set waiting_lsn,
+ * but not ready_lsn, to lsn_rc.lsn.
+ */
+ if (lsn_rc.rectype == REP_NEWFILE &&
+ nextrec_dbt.size > 0 && log_compare(
+ &newfile_lsn, &lp->ready_lsn) < 0)
+ /* Case 4. */
+ lp->ready_lsn =
+ lp->waiting_lsn = lsn_rc.lsn;
+ else {
+ /* Cases 1, 2, 3, and 5. */
+ DB_ASSERT(log_compare(&lsn_rc.lsn,
+ &lp->ready_lsn) >= 0);
+ lp->waiting_lsn = lsn_rc.lsn;
+ }
+
+ /*
+ * If the current rectype is simple, we're
+ * done with it, and we should check and see
+ * whether the next record queued is the next
+ * one we're ready for. This is just the loop
+ * condition, so we continue.
+ *
+ * Otherwise, we need to break out of this loop
+ * and process this record first.
+ */
+ if (!IS_SIMPLE(rectype))
+ break;
+ }
+ }
+
+ /*
+ * Check if we're at a gap in the table and if so, whether we
+ * need to ask for any records.
+ */
+ do_req = 0;
+ if (!IS_ZERO_LSN(lp->waiting_lsn) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) != 0) {
+ next_lsn = lp->ready_lsn;
+ do_req = ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (dbc != NULL) {
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 0;
+ }
+ dbc = NULL;
+
+ if (do_req) {
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ eid = db_rep->region->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0)) != 0)
+ goto err;
+ }
+ }
+ } else if (cmp > 0) {
+ /*
+ * The LSN is higher than the one we were waiting for.
+ * If it is a NEWFILE message, this may not mean that
+ * there's a gap; in some cases, NEWFILE messages contain
+ * the LSN of the beginning of the new file instead
+ * of the end of the old.
+ *
+ * In these cases, the rec DBT will contain the last LSN
+ * of the old file, so we can tell whether there's a gap.
+ */
+ if (rp->rectype == REP_NEWFILE &&
+ rp->lsn.file == lp->ready_lsn.file + 1 &&
+ rp->lsn.offset == 0) {
+ DB_ASSERT(rec != NULL && rec->data != NULL &&
+ rec->size == sizeof(DB_LSN));
+ memcpy(&lsn, rec->data, sizeof(DB_LSN));
+ if (log_compare(&lp->ready_lsn, &lsn) > 0)
+ /*
+ * The last LSN in the old file is smaller
+ * than the one we're expecting, so there's
+ * no gap--the one we're expecting just
+ * doesn't exist.
+ */
+ goto newfile;
+ }
+
+ /*
+ * This record isn't in sequence; add it to the table and
+ * update waiting_lsn if necessary.
+ */
+ memset(&key_dbt, 0, sizeof(key_dbt));
+ key_dbt.data = rp;
+ key_dbt.size = sizeof(*rp);
+ next_lsn = lp->lsn;
+ do_req = 0;
+ if (lp->wait_recs == 0) {
+ /*
+ * This is a new gap. Initialize the number of
+ * records that we should wait before requesting
+ * that it be resent. We grab the limits out of
+ * the rep without the mutex.
+ */
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+
+ if (++lp->rcvd_recs >= lp->wait_recs) {
+ /*
+ * If we've waited long enough, request the record
+ * and double the wait interval.
+ */
+ do_req = 1;
+ lp->wait_recs <<= 1;
+ lp->rcvd_recs = 0;
+ if (lp->wait_recs > rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->put(dbp, NULL, &key_dbt, rec, 0);
+ rep->stat.st_log_queued++;
+ rep->stat.st_log_queued_total++;
+ if (rep->stat.st_log_queued_max < rep->stat.st_log_queued)
+ rep->stat.st_log_queued_max = rep->stat.st_log_queued;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (ret != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (IS_ZERO_LSN(lp->waiting_lsn) ||
+ log_compare(&rp->lsn, &lp->waiting_lsn) < 0)
+ lp->waiting_lsn = rp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (do_req) {
+ /* Request the LSN we are still waiting for. */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* May as well do this after we grab the mutex. */
+ eid = db_rep->region->master_id;
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to to bring ourselves up to date again anyway.
+ */
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0);
+ } else
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ }
+ return (ret);
+ } else {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_duplicated++;
+ }
+ if (ret != 0 || cmp < 0 || (cmp == 0 && IS_SIMPLE(rectype)))
+ goto done;
+
+ /*
+ * If we got here, then we've got a log record in rp and rec that
+ * we need to process.
+ */
+ switch(rectype) {
+ case DB___dbreg_register:
+ /*
+ * DB opens occur in the context of a transaction, so we can
+ * simply handle them when we process the transaction. Closes,
+ * however, are not transaction-protected, so we have to
+ * handle them here.
+ *
+ * Note that it should be unsafe for the master to do a close
+ * of a file that was opened in an active transaction, so we
+ * should be guaranteed to get the ordering right.
+ */
+ memcpy(&txnid, (u_int8_t *)rec->data +
+ ((u_int8_t *)&dbreg_args.txnid - (u_int8_t *)&dbreg_args),
+ sizeof(u_int32_t));
+ if (txnid == TXN_INVALID &&
+ !F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, rec, &rp->lsn,
+ DB_TXN_APPLY, NULL);
+ break;
+ case DB___txn_ckp:
+ /* Sync the memory pool. */
+ memcpy(&ckp_lsn, (u_int8_t *)rec->data +
+ ((u_int8_t *)&ckp_args.ckp_lsn - (u_int8_t *)&ckp_args),
+ sizeof(DB_LSN));
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = dbenv->memp_sync(dbenv, &ckp_lsn);
+ else
+ /*
+ * We ought to make sure the logs on a logs-only
+ * replica get flushed now and again.
+ */
+ ret = dbenv->log_flush(dbenv, &ckp_lsn);
+ /* Update the last_ckp in the txn region. */
+ if (ret == 0)
+ __txn_updateckp(dbenv, &rp->lsn);
+ break;
+ case DB___txn_regop:
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ do {
+ /*
+ * If an application is doing app-specific
+ * recovery and acquires locks while applying
+ * a transaction, it can deadlock. Any other
+ * locks held by this thread should have been
+ * discarded in the __rep_process_txn error
+ * path, so if we simply retry, we should
+ * eventually succeed.
+ */
+ ret = __rep_process_txn(dbenv, rec);
+ } while (ret == DB_LOCK_DEADLOCK);
+ break;
+ default:
+ goto err;
+ }
+
+ /* Check if we need to go back into the table. */
+ if (ret == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0)
+ goto gap_check;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+done:
+err: if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (have_mutex)
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (control_dbt.data != NULL)
+ __os_ufree(dbenv, control_dbt.data);
+ if (rec_dbt.data != NULL)
+ __os_ufree(dbenv, rec_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_process_txn --
+ *
+ * This is the routine that actually gets a transaction ready for
+ * processing.
+ *
+ * PUBLIC: int __rep_process_txn __P((DB_ENV *, DBT *));
+ */
+int
+__rep_process_txn(dbenv, rec)
+ DB_ENV *dbenv;
+ DBT *rec;
+{
+ DBT data_dbt;
+ DB_LOCKREQ req, *lvp;
+ DB_LOGC *logc;
+ DB_LSN prev_lsn, *lsnp;
+ DB_REP *db_rep;
+ LSN_COLLECTION lc;
+ REP *rep;
+ __txn_regop_args *txn_args;
+ __txn_xa_regop_args *prep_args;
+ u_int32_t lockid, op, rectype;
+ int i, ret, t_ret;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ void *txninfo;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ logc = NULL;
+ txninfo = NULL;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /*
+ * There are two phases: First, we have to traverse
+ * backwards through the log records gathering the list
+ * of all LSNs in the transaction. Once we have this information,
+ * we can loop through, acquire the locks we need for each record,
+ * and then apply it.
+ */
+ dtab = NULL;
+
+ /*
+ * We may be passed a prepare (if we're restoring a prepare
+ * on upgrade) instead of a commit (the common case).
+ * Check which and behave appropriately.
+ */
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ memset(&lc, 0, sizeof(lc));
+ if (rectype == DB___txn_regop) {
+ /*
+ * We're the end of a transaction. Make sure this is
+ * really a commit and not an abort!
+ */
+ if ((ret = __txn_regop_read(dbenv, rec->data, &txn_args)) != 0)
+ return (ret);
+ op = txn_args->opcode;
+ prev_lsn = txn_args->prev_lsn;
+ __os_free(dbenv, txn_args);
+ if (op != TXN_COMMIT)
+ return (0);
+ } else {
+ /* We're a prepare. */
+ DB_ASSERT(rectype == DB___txn_xa_regop);
+
+ if ((ret =
+ __txn_xa_regop_read(dbenv, rec->data, &prep_args)) != 0)
+ return (ret);
+ prev_lsn = prep_args->prev_lsn;
+ __os_free(dbenv, prep_args);
+ }
+
+ /* Phase 1. Get a list of the LSNs in this transaction, and sort it. */
+ if ((ret = __rep_collect_txn(dbenv, &prev_lsn, &lc)) != 0)
+ return (ret);
+ qsort(lc.array, lc.nlsns, sizeof(DB_LSN), __rep_lsn_cmp);
+
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+
+ /* Initialize the getpgno dispatch table. */
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+
+ /*
+ * The set of records for a transaction may include dbreg_register
+ * records. Create a txnlist so that they can keep track of file
+ * state between records.
+ */
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+
+ /* Phase 2: Apply updates. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ for (lsnp = &lc.array[0], i = 0; i < lc.nlsns; i++, lsnp++) {
+ if ((ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, lsnp, NULL, NULL, lockid)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, lsnp, &data_dbt, DB_SET)) != 0)
+ goto err;
+ if ((ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data_dbt, lsnp,
+ DB_TXN_APPLY, txninfo)) != 0)
+ goto err;
+ }
+
+err: memset(&req, 0, sizeof(req));
+ req.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = dbenv->lock_vec(dbenv, lockid,
+ DB_LOCK_FREE_LOCKER, &req, 1, &lvp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (lc.nalloc != 0)
+ __os_free(dbenv, lc.array);
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
+ if (ret == 0)
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_txns_applied++;
+
+ return (ret);
+}
+
+/*
+ * __rep_collect_txn
+ * Recursive function that will let us visit every entry in a transaction
+ * chain including all child transactions so that we can then apply
+ * the entire transaction family at once.
+ */
+static int
+__rep_collect_txn(dbenv, lsnp, lc)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ LSN_COLLECTION *lc;
+{
+ __txn_child_args *argp;
+ DB_LOGC *logc;
+ DB_LSN c_lsn;
+ DBT data;
+ u_int32_t rectype;
+ int nalloc, ret, t_ret;
+
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ while (!IS_ZERO_LSN(*lsnp) &&
+ (ret = logc->get(logc, lsnp, &data, DB_SET)) == 0) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype == DB___txn_child) {
+ if ((ret = __txn_child_read(dbenv,
+ data.data, &argp)) != 0)
+ goto err;
+ c_lsn = argp->c_lsn;
+ *lsnp = argp->prev_lsn;
+ __os_free(dbenv, argp);
+ ret = __rep_collect_txn(dbenv, &c_lsn, lc);
+ } else {
+ if (lc->nalloc < lc->nlsns + 1) {
+ nalloc = lc->nalloc == 0 ? 20 : lc->nalloc * 2;
+ if ((ret = __os_realloc(dbenv,
+ nalloc * sizeof(DB_LSN), &lc->array)) != 0)
+ goto err;
+ lc->nalloc = nalloc;
+ }
+ lc->array[lc->nlsns++] = *lsnp;
+
+ /*
+ * Explicitly copy the previous lsn. The record
+ * starts with a u_int32_t record type, a u_int32_t
+ * txn id, and then the DB_LSN (prev_lsn) that we
+ * want. We copy explicitly because we have no idea
+ * what kind of record this is.
+ */
+ memcpy(lsnp, (u_int8_t *)data.data +
+ sizeof(u_int32_t) + sizeof(u_int32_t),
+ sizeof(DB_LSN));
+ }
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ return (ret);
+}
+
+/*
+ * __rep_lsn_cmp --
+ * qsort-type-compatible wrapper for log_compare.
+ */
+static int
+__rep_lsn_cmp(lsn1, lsn2)
+ const void *lsn1, *lsn2;
+{
+
+ return (log_compare((DB_LSN *)lsn1, (DB_LSN *)lsn2));
+}
+
+/*
+ * __rep_newfile --
+ * NEWFILE messages can contain either the last LSN of the old file
+ * or the first LSN of the new one, depending on which we have available
+ * when the message is sent. When applying a NEWFILE message, make sure
+ * we haven't already swapped files, as it's possible (given the right sequence
+ * of out-of-order messages) to wind up with a NEWFILE message of each
+ * variety, and __rep_apply won't detect the two as duplicates of each other.
+ */
+static int
+__rep_newfile(dbenv, rc, msgdbt, lsnp)
+ DB_ENV *dbenv;
+ REP_CONTROL *rc;
+ DBT *msgdbt;
+ DB_LSN *lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t newfile;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * A NEWFILE message containing the old file's LSN will be
+ * accompanied by a NULL rec DBT; one containing the new one's LSN
+ * will need to supply the last record in the old file by
+ * sending it in the rec DBT.
+ */
+ if (msgdbt == NULL || msgdbt->size == 0)
+ newfile = rc->lsn.file + 1;
+ else
+ newfile = rc->lsn.file;
+
+ if (newfile > lp->lsn.file)
+ return (__log_newfile(dblp, lsnp));
+ else {
+ /* We've already applied this NEWFILE. Just ignore it. */
+ *lsnp = lp->lsn;
+ return (0);
+ }
+}
diff --git a/storage/bdb/rep/rep_region.c b/storage/bdb/rep/rep_region.c
new file mode 100644
index 00000000000..1ac3fb8a20c
--- /dev/null
+++ b/storage/bdb/rep/rep_region.c
@@ -0,0 +1,187 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_region.c,v 1.29 2002/08/06 04:50:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#endif
+
+#include <string.h>
+
+#include "db_int.h"
+#include "dbinc/rep.h"
+#include "dbinc/log.h"
+
+/*
+ * __rep_region_init --
+ * Initialize the shared memory state for the replication system.
+ *
+ * PUBLIC: int __rep_region_init __P((DB_ENV *));
+ */
+int
+__rep_region_init(dbenv)
+ DB_ENV *dbenv;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ DB_MUTEX *db_mutexp;
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if (renv->rep_off == INVALID_ROFF) {
+ /* Must create the region. */
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(REP), MUTEX_ALIGN, &rep)) != 0)
+ goto err;
+ memset(rep, 0, sizeof(*rep));
+ rep->tally_off = INVALID_ROFF;
+ renv->rep_off = R_OFFSET(infop, rep);
+
+ if ((ret = __db_mutex_setup(dbenv, infop, &rep->mutex,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /*
+ * We must create a place for the db_mutex separately;
+ * mutexes have to be aligned to MUTEX_ALIGN, and the only way
+ * to guarantee that is to make sure they're at the beginning
+ * of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX),
+ MUTEX_ALIGN, &db_mutexp)) != 0)
+ goto err;
+ rep->db_mutex_off = R_OFFSET(infop, db_mutexp);
+
+ /*
+ * Because we have no way to prevent deadlocks and cannot log
+ * changes made to it, we single-thread access to the client
+ * bookkeeping database. This is suboptimal, but it only gets
+ * accessed when messages arrive out-of-order, so it should
+ * stay small and not be used in a high-performance app.
+ */
+ if ((ret = __db_mutex_setup(dbenv, infop, db_mutexp,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /* We have the region; fill in the values. */
+ rep->eid = DB_EID_INVALID;
+ rep->master_id = DB_EID_INVALID;
+ rep->gen = 0;
+
+ /*
+ * Set default values for the min and max log records that we
+ * wait before requesting a missing log record.
+ */
+ rep->request_gap = DB_REP_REQUEST_GAP;
+ rep->max_gap = DB_REP_MAX_GAP;
+ } else
+ rep = R_ADDR(infop, renv->rep_off);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ db_rep->mutexp = &rep->mutex;
+ db_rep->db_mutexp = R_ADDR(infop, rep->db_mutex_off);
+ db_rep->region = rep;
+
+ return (0);
+
+err: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+/*
+ * __rep_region_destroy --
+ * Destroy any system resources allocated in the replication region.
+ *
+ * PUBLIC: int __rep_region_destroy __P((DB_ENV *));
+ */
+int
+__rep_region_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ if (db_rep->mutexp != NULL)
+ ret = __db_mutex_destroy(db_rep->mutexp);
+ if (db_rep->db_mutexp != NULL)
+ t_ret = __db_mutex_destroy(db_rep->db_mutexp);
+ }
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_dbenv_close --
+ * Replication-specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_close __P((DB_ENV *));
+ */
+int
+__rep_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ __os_free(dbenv, db_rep);
+ dbenv->rep_handle = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_preclose --
+ * If we are a client, shut down our client database and, if we're
+ * actually closing the environment, close all databases we've opened
+ * while applying messages.
+ *
+ * PUBLIC: int __rep_preclose __P((DB_ENV *, int));
+ */
+int
+__rep_preclose(dbenv, do_closefiles)
+ DB_ENV *dbenv;
+ int do_closefiles;
+{
+ DB *dbp;
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+
+ /* If replication is not initialized, we have nothing to do. */
+ if ((db_rep = (DB_REP *)dbenv->rep_handle) == NULL)
+ return (0);
+
+ if ((dbp = db_rep->rep_db) != NULL) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->close(dbp, 0);
+ db_rep->rep_db = NULL;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ }
+
+ if (do_closefiles)
+ t_ret = __dbreg_close_files(dbenv);
+
+ return (ret == 0 ? t_ret : ret);
+}
diff --git a/storage/bdb/rep/rep_util.c b/storage/bdb/rep/rep_util.c
new file mode 100644
index 00000000000..9c99d33ed4a
--- /dev/null
+++ b/storage/bdb/rep/rep_util.c
@@ -0,0 +1,867 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_util.c,v 1.51 2002/09/05 02:30:00 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * rep_util.c:
+ * Miscellaneous replication-related utility functions, including
+ * those called by other subsystems.
+ */
+static int __rep_cmp_bylsn __P((const void *, const void *));
+static int __rep_cmp_bypage __P((const void *, const void *));
+
+#ifdef REP_DIAGNOSTIC
+static void __rep_print_logmsg __P((DB_ENV *, const DBT *, DB_LSN *));
+#endif
+
+/*
+ * __rep_check_alloc --
+ * Make sure the array of TXN_REC entries is of at least size n.
+ * (This function is called by the __*_getpgnos() functions in
+ * *.src.)
+ *
+ * PUBLIC: int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+ */
+int
+__rep_check_alloc(dbenv, r, n)
+ DB_ENV *dbenv;
+ TXN_RECS *r;
+ int n;
+{
+ int nalloc, ret;
+
+ while (r->nalloc < r->npages + n) {
+ nalloc = r->nalloc == 0 ? 20 : r->nalloc * 2;
+
+ if ((ret = __os_realloc(dbenv, nalloc * sizeof(LSN_PAGE),
+ &r->array)) != 0)
+ return (ret);
+
+ r->nalloc = nalloc;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_send_message --
+ * This is a wrapper for sending a message. It takes care of constructing
+ * the REP_CONTROL structure and calling the user's specified send function.
+ *
+ * PUBLIC: int __rep_send_message __P((DB_ENV *, int,
+ * PUBLIC: u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+ DB_ENV *dbenv;
+ int eid;
+ u_int32_t rtype;
+ DB_LSN *lsnp;
+ const DBT *dbtp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ DBT cdbt, scrap_dbt;
+ REP_CONTROL cntrl;
+ u_int32_t send_flags;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /* Set up control structure. */
+ memset(&cntrl, 0, sizeof(cntrl));
+ if (lsnp == NULL)
+ ZERO_LSN(cntrl.lsn);
+ else
+ cntrl.lsn = *lsnp;
+ cntrl.rectype = rtype;
+ cntrl.flags = flags;
+ cntrl.rep_version = DB_REPVERSION;
+ cntrl.log_version = DB_LOGVERSION;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ cntrl.gen = rep->gen;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ memset(&cdbt, 0, sizeof(cdbt));
+ cdbt.data = &cntrl;
+ cdbt.size = sizeof(cntrl);
+
+ /* Don't assume the send function will be tolerant of NULL records. */
+ if (dbtp == NULL) {
+ memset(&scrap_dbt, 0, sizeof(DBT));
+ dbtp = &scrap_dbt;
+ }
+
+ send_flags = (LF_ISSET(DB_PERMANENT) ? DB_REP_PERMANENT : 0);
+
+#if 0
+ __rep_print_message(dbenv, eid, &cntrl, "rep_send_message");
+#endif
+#ifdef REP_DIAGNOSTIC
+ if (rtype == REP_LOG)
+ __rep_print_logmsg(dbenv, dbtp, lsnp);
+#endif
+ ret = db_rep->rep_send(dbenv, &cdbt, dbtp, eid, send_flags);
+
+ /*
+ * We don't hold the rep lock, so this could miscount if we race.
+ * I don't think it's worth grabbing the mutex for that bit of
+ * extra accuracy.
+ */
+ if (ret == 0)
+ rep->stat.st_msgs_sent++;
+ else
+ rep->stat.st_msgs_send_failures++;
+
+ return (ret);
+}
+
+#ifdef REP_DIAGNOSTIC
+
+/*
+ * __rep_print_logmsg --
+ * This is a debugging routine for printing out log records that
+ * we are about to transmit to a client.
+ */
+
+static void
+__rep_print_logmsg(dbenv, logdbt, lsnp)
+ DB_ENV *dbenv;
+ const DBT *logdbt;
+ DB_LSN *lsnp;
+{
+ /* Static structures to hold the printing functions. */
+ static int (**ptab)__P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *)) = NULL;
+ size_t ptabsize = 0;
+
+ if (ptabsize == 0) {
+ /* Initialize the table. */
+ (void)__bam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__crdel_init_print(dbenv, &ptab, &ptabsize);
+ (void)__db_init_print(dbenv, &ptab, &ptabsize);
+ (void)__dbreg_init_print(dbenv, &ptab, &ptabsize);
+ (void)__fop_init_print(dbenv, &ptab, &ptabsize);
+ (void)__qam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__ham_init_print(dbenv, &ptab, &ptabsize);
+ (void)__txn_init_print(dbenv, &ptab, &ptabsize);
+ }
+
+ (void)__db_dispatch(dbenv,
+ ptab, ptabsize, (DBT *)logdbt, lsnp, DB_TXN_PRINT, NULL);
+}
+
+#endif
+/*
+ * __rep_new_master --
+ * Called after a master election to sync back up with a new master.
+ * It's possible that we already know of this new master in which case
+ * we don't need to do anything.
+ *
+ * This is written assuming that this message came from the master; we
+ * need to enforce that in __rep_process_record, but right now, we have
+ * no way to identify the master.
+ *
+ * PUBLIC: int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_new_master(dbenv, cntrl, eid)
+ DB_ENV *dbenv;
+ REP_CONTROL *cntrl;
+ int eid;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN last_lsn, lsn;
+ DB_REP *db_rep;
+ DBT dbt;
+ LOG *lp;
+ REP *rep;
+ int change, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ change = rep->gen != cntrl->gen || rep->master_id != eid;
+ if (change) {
+ rep->gen = cntrl->gen;
+ rep->master_id = eid;
+ F_SET(rep, REP_F_RECOVER);
+ rep->stat.st_master_changes++;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (!change)
+ return (0);
+
+ /*
+ * If the master changed, we need to start the process of
+ * figuring out what our last valid log record is. However,
+ * if both the master and we agree that the max LSN is 0,0,
+ * then there is no recovery to be done. If we are at 0 and
+ * the master is not, then we just need to request all the log
+ * records from the master.
+ */
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ last_lsn = lsn = lp->lsn;
+ if (last_lsn.offset > sizeof(LOGP))
+ last_lsn.offset -= lp->len;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn)) {
+empty: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (IS_INIT_LSN(cntrl->lsn))
+ ret = 0;
+ else
+ ret = __rep_send_message(dbenv, rep->master_id,
+ REP_ALL_REQ, &lsn, NULL, 0);
+
+ if (ret == 0)
+ ret = DB_REP_NEWMASTER;
+ return (ret);
+ } else if (last_lsn.offset <= sizeof(LOGP)) {
+ /*
+ * We have just changed log files and need to set lastlsn
+ * to the last record in the previous log files.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(dbt));
+ ret = logc->get(logc, &last_lsn, &dbt, DB_LAST);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret == DB_NOTFOUND)
+ goto empty;
+ if (ret != 0)
+ return (ret);
+ }
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = last_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_VERIFY_REQ, &last_lsn, NULL, 0)) != 0)
+ return (ret);
+
+ return (DB_REP_NEWMASTER);
+}
+
+/*
+ * __rep_lockpgno_init
+ * Create a dispatch table for acquiring locks on each log record.
+ *
+ * PUBLIC: int __rep_lockpgno_init __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__rep_lockpgno_init(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ /* Initialize dispatch table. */
+ *dtabsizep = 0;
+ *dtabp = NULL;
+ if ((ret = __bam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __crdel_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __db_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __fop_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __qam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __ham_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __txn_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __rep_unlockpages --
+ * Unlock the pages locked in __rep_lockpages.
+ *
+ * PUBLIC: int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+ */
+int
+__rep_unlockpages(dbenv, lid)
+ DB_ENV *dbenv;
+ u_int32_t lid;
+{
+ DB_LOCKREQ req, *lvp;
+
+ req.op = DB_LOCK_PUT_ALL;
+ return (dbenv->lock_vec(dbenv, lid, 0, &req, 1, &lvp));
+}
+
+/*
+ * __rep_lockpages --
+ * Called to gather and lock pages in preparation for both
+ * single transaction apply as well as client synchronization
+ * with a new master. A non-NULL key_lsn means that we're locking
+ * in order to apply a single log record during client recovery
+ * to the joint LSN. A non-NULL max_lsn means that we are applying
+ * a transaction whose commit is at max_lsn.
+ *
+ * PUBLIC: int __rep_lockpages __P((DB_ENV *,
+ * PUBLIC: int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t, DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+ */
+int
+__rep_lockpages(dbenv, dtab, dtabsize, key_lsn, max_lsn, recs, lid)
+ DB_ENV *dbenv;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DB_LSN *key_lsn, *max_lsn;
+ TXN_RECS *recs;
+ u_int32_t lid;
+{
+ DBT data_dbt, lo;
+ DB_LOCK l;
+ DB_LOCKREQ *lvp;
+ DB_LOGC *logc;
+ DB_LSN tmp_lsn;
+ TXN_RECS tmp, *t;
+ db_pgno_t cur_pgno;
+ linfo_t locks;
+ int i, ret, t_ret, unique;
+ u_int32_t cur_fid;
+
+ /*
+ * There are two phases: First, we have to traverse backwards through
+ * the log records gathering the list of all the pages accessed. Once
+ * we have this information we can acquire all the locks we need.
+ */
+
+ /* Initialization */
+ memset(&locks, 0, sizeof(locks));
+ ret = 0;
+
+ t = recs != NULL ? recs : &tmp;
+ t->npages = t->nalloc = 0;
+ t->array = NULL;
+
+ /*
+ * We've got to be in one mode or the other; else life will either
+ * be excessively boring or overly exciting.
+ */
+ DB_ASSERT(key_lsn != NULL || max_lsn != NULL);
+ DB_ASSERT(key_lsn == NULL || max_lsn == NULL);
+
+ /*
+ * Phase 1: Fill in the pgno array.
+ */
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /* Single transaction apply. */
+ if (max_lsn != NULL) {
+ DB_ASSERT(0); /* XXX */
+ /*
+ tmp_lsn = *max_lsn;
+ if ((ret = __rep_apply_thread(dbenv, dtab, dtabsize,
+ &data_dbt, &tmp_lsn, t)) != 0)
+ goto err;
+ */
+ }
+
+ /* In recovery. */
+ if (key_lsn != NULL) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ ret = logc->get(logc, key_lsn, &data_dbt, DB_SET);
+
+ /* Save lsn values, since dispatch functions can change them. */
+ tmp_lsn = *key_lsn;
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data_dbt, &tmp_lsn, DB_TXN_GETPGNOS, t);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * If ret == DB_DELETED, this record refers to a temporary
+ * file and there's nothing to apply.
+ */
+ if (ret == DB_DELETED) {
+ ret = 0;
+ goto out;
+ } else if (ret != 0)
+ goto err;
+ }
+
+ if (t->npages == 0)
+ goto out;
+
+ /* Phase 2: Write lock all the pages. */
+
+ /* Sort the entries in the array by page number. */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bypage);
+
+ /* Count the number of unique pages. */
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ unique = 0;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ unique++;
+ }
+ }
+
+ if (unique == 0)
+ goto out;
+
+ /* Handle single lock case specially, else allocate space for locks. */
+ if (unique == 1) {
+ memset(&lo, 0, sizeof(lo));
+ lo.data = &t->array[0].pgdesc;
+ lo.size = sizeof(t->array[0].pgdesc);
+ ret = dbenv->lock_get(dbenv, lid, 0, &lo, DB_LOCK_WRITE, &l);
+ goto out2;
+ }
+
+ /* Multi-lock case. */
+ locks.n = unique;
+ if ((ret = __os_calloc(dbenv,
+ unique, sizeof(DB_LOCKREQ), &locks.reqs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, unique, sizeof(DBT), &locks.objs)) != 0)
+ goto err;
+
+ unique = 0;
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ locks.reqs[unique].op = DB_LOCK_GET;
+ locks.reqs[unique].mode = DB_LOCK_WRITE;
+ locks.reqs[unique].obj = &locks.objs[unique];
+ locks.objs[unique].data = &t->array[i].pgdesc;
+ locks.objs[unique].size = sizeof(t->array[i].pgdesc);
+ unique++;
+ }
+ }
+
+ /* Finally, get the locks. */
+ if ((ret =
+ dbenv->lock_vec(dbenv, lid, 0, locks.reqs, unique, &lvp)) != 0) {
+ /*
+ * If we were unsuccessful, unlock any locks we acquired before
+ * the error and return the original error value.
+ */
+ (void)__rep_unlockpages(dbenv, lid);
+ }
+
+err:
+out: if (locks.objs != NULL)
+ __os_free(dbenv, locks.objs);
+ if (locks.reqs != NULL)
+ __os_free(dbenv, locks.reqs);
+
+ /*
+ * Before we return, sort by LSN so that we apply records in the
+ * right order.
+ */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bylsn);
+
+out2: if ((ret != 0 || recs == NULL) && t->nalloc != 0) {
+ __os_free(dbenv, t->array);
+ t->array = NULL;
+ t->npages = t->nalloc = 0;
+ }
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_cmp_bypage and __rep_cmp_bylsn --
+ * Sort functions for qsort. "bypage" sorts first by page numbers and
+ * then by the LSN. "bylsn" sorts first by the LSN, then by page numbers.
+ */
+static int
+__rep_cmp_bypage(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ return (0);
+}
+
+static int
+__rep_cmp_bylsn(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * __rep_is_client
+ * Used by other subsystems to figure out if this is a replication
+ * client sites.
+ *
+ * PUBLIC: int __rep_is_client __P((DB_ENV *));
+ */
+int
+__rep_is_client(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ if ((db_rep = dbenv->rep_handle) == NULL)
+ return (0);
+ rep = db_rep->region;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = F_ISSET(rep, REP_F_UPGRADE | REP_F_LOGSONLY);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_send_vote
+ * Send this site's vote for the election.
+ *
+ * PUBLIC: int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int));
+ */
+int
+__rep_send_vote(dbenv, lsnp, nsites, pri, tiebreaker)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, pri, tiebreaker;
+{
+ DBT vote_dbt;
+ REP_VOTE_INFO vi;
+
+ memset(&vi, 0, sizeof(vi));
+
+ vi.priority = pri;
+ vi.nsites = nsites;
+ vi.tiebreaker = tiebreaker;
+
+ memset(&vote_dbt, 0, sizeof(vote_dbt));
+ vote_dbt.data = &vi;
+ vote_dbt.size = sizeof(vi);
+
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_VOTE1, lsnp, &vote_dbt, 0));
+}
+
+/*
+ * __rep_grow_sites --
+ * Called to allocate more space in the election tally information.
+ * Called with the rep mutex held. We need to call the region mutex, so
+ * we need to make sure that we *never* acquire those mutexes in the
+ * opposite order.
+ *
+ * PUBLIC: int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+ */
+int
+__rep_grow_sites(dbenv, nsites)
+ DB_ENV *dbenv;
+ int nsites;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REP *rep;
+ int nalloc, ret, *tally;
+
+ rep = ((DB_REP *)dbenv->rep_handle)->region;
+
+ /*
+ * Allocate either twice the current allocation or nsites,
+ * whichever is more.
+ */
+
+ nalloc = 2 * rep->asites;
+ if (nalloc < nsites)
+ nalloc = nsites;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(nalloc * sizeof(int)), sizeof(int), &tally)) == 0) {
+ if (rep->tally_off != INVALID_ROFF)
+ __db_shalloc_free(infop->addr,
+ R_ADDR(infop, rep->tally_off));
+ rep->asites = nalloc;
+ rep->nsites = nsites;
+ rep->tally_off = R_OFFSET(infop, tally);
+ }
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+#ifdef NOTYET
+static int __rep_send_file __P((DB_ENV *, DBT *, u_int32_t));
+/*
+ * __rep_send_file --
+ * Send an entire file, one block at a time.
+ */
+static int
+__rep_send_file(dbenv, rec, eid)
+ DB_ENV *dbenv;
+ DBT *rec;
+ u_int32_t eid;
+{
+ DB *dbp;
+ DB_LOCK lk;
+ DB_MPOOLFILE *mpf;
+ DBC *dbc;
+ DBT rec_dbt;
+ PAGE *pagep;
+ db_pgno_t last_pgno, pgno;
+ int ret, t_ret;
+
+ dbp = NULL;
+ dbc = NULL;
+ pagep = NULL;
+ mpf = NULL;
+ LOCK_INIT(lk);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp, rec->data, NULL, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * Force last_pgno to some value that will let us read the meta-dat
+ * page in the following loop.
+ */
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+ last_pgno = 1;
+ for (pgno = 0; pgno <= last_pgno; pgno++) {
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lk)) != 0)
+ goto err;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ goto err;
+
+ if (pgno == 0)
+ last_pgno = ((DBMETA *)pagep)->last_pgno;
+
+ rec_dbt.data = pagep;
+ rec_dbt.size = dbp->pgsize;
+ if ((ret = __rep_send_message(dbenv, eid,
+ REP_FILE, NULL, &rec_dbt, pgno == last_pgno)) != 0)
+ goto err;
+ ret = mpf->put(mpf, pagep, 0);
+ pagep = NULL;
+ if (ret != 0)
+ goto err;
+ ret = __LPUT(dbc, lk);
+ LOCK_INIT(lk);
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (LOCK_ISSET(lk) && (t_ret = __LPUT(dbc, lk)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagep != NULL && (t_ret = mpf->put(mpf, pagep, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+#endif
+
+#if 0
+/*
+ * PUBLIC: void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *));
+ */
+void
+__rep_print_message(dbenv, eid, rp, str)
+ DB_ENV *dbenv;
+ int eid;
+ REP_CONTROL *rp;
+ char *str;
+{
+ char *type;
+ switch (rp->rectype) {
+ case REP_ALIVE:
+ type = "alive";
+ break;
+ case REP_ALIVE_REQ:
+ type = "alive_req";
+ break;
+ case REP_ALL_REQ:
+ type = "all_req";
+ break;
+ case REP_ELECT:
+ type = "elect";
+ break;
+ case REP_FILE:
+ type = "file";
+ break;
+ case REP_FILE_REQ:
+ type = "file_req";
+ break;
+ case REP_LOG:
+ type = "log";
+ break;
+ case REP_LOG_MORE:
+ type = "log_more";
+ break;
+ case REP_LOG_REQ:
+ type = "log_req";
+ break;
+ case REP_MASTER_REQ:
+ type = "master_req";
+ break;
+ case REP_NEWCLIENT:
+ type = "newclient";
+ break;
+ case REP_NEWFILE:
+ type = "newfile";
+ break;
+ case REP_NEWMASTER:
+ type = "newmaster";
+ break;
+ case REP_NEWSITE:
+ type = "newsite";
+ break;
+ case REP_PAGE:
+ type = "page";
+ break;
+ case REP_PAGE_REQ:
+ type = "page_req";
+ break;
+ case REP_PLIST:
+ type = "plist";
+ break;
+ case REP_PLIST_REQ:
+ type = "plist_req";
+ break;
+ case REP_VERIFY:
+ type = "verify";
+ break;
+ case REP_VERIFY_FAIL:
+ type = "verify_fail";
+ break;
+ case REP_VERIFY_REQ:
+ type = "verify_req";
+ break;
+ case REP_VOTE1:
+ type = "vote1";
+ break;
+ case REP_VOTE2:
+ type = "vote2";
+ break;
+ default:
+ type = "NOTYPE";
+ break;
+ }
+ printf("%s %s: gen = %d eid %d, type %s, LSN [%u][%u]\n",
+ dbenv->db_home, str, rp->gen, eid, type, rp->lsn.file,
+ rp->lsn.offset);
+}
+#endif
diff --git a/storage/bdb/rpc_client/client.c b/storage/bdb/rpc_client/client.c
new file mode 100644
index 00000000000..b6367e21449
--- /dev/null
+++ b/storage/bdb/rpc_client/client.c
@@ -0,0 +1,464 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: client.c,v 1.51 2002/08/06 06:18:15 bostic Exp $";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_VXWORKS
+#include <rpcLib.h>
+#endif
+#include <rpc/rpc.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+static int __dbcl_c_destroy __P((DBC *));
+static int __dbcl_txn_close __P((DB_ENV *));
+
+/*
+ * __dbcl_envrpcserver --
+ * Initialize an environment's server.
+ *
+ * PUBLIC: int __dbcl_envrpcserver
+ * PUBLIC: __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+ */
+int
+__dbcl_envrpcserver(dbenv, clnt, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *clnt;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ CLIENT *cl;
+ struct timeval tp;
+
+ COMPQUIET(flags, 0);
+
+#ifdef HAVE_VXWORKS
+ if (rpcTaskInit() != 0) {
+ __db_err(dbenv, "Could not initialize VxWorks RPC");
+ return (ERROR);
+ }
+#endif
+ if (RPC_ON(dbenv)) {
+ __db_err(dbenv, "Already set an RPC handle");
+ return (EINVAL);
+ }
+ /*
+ * Only create the client and set its timeout if the user
+ * did not pass us a client structure to begin with.
+ */
+ if (clnt == NULL) {
+ if ((cl = clnt_create((char *)host, DB_RPC_SERVERPROG,
+ DB_RPC_SERVERVERS, "tcp")) == NULL) {
+ __db_err(dbenv, clnt_spcreateerror((char *)host));
+ return (DB_NOSERVER);
+ }
+ if (tsec != 0) {
+ tp.tv_sec = tsec;
+ tp.tv_usec = 0;
+ (void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
+ }
+ } else {
+ cl = (CLIENT *)clnt;
+ F_SET(dbenv, DB_ENV_RPCCLIENT_GIVEN);
+ }
+ dbenv->cl_handle = cl;
+
+ return (__dbcl_env_create(dbenv, ssec));
+}
+
+/*
+ * __dbcl_env_open_wrap --
+ * Wrapper function for DB_ENV->open function for clients.
+ * We need a wrapper function to deal with DB_USE_ENVIRON* flags
+ * and we don't want to complicate the generated code for env_open.
+ *
+ * PUBLIC: int __dbcl_env_open_wrap
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbcl_env_open_wrap(dbenv, home, flags, mode)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+{
+ int ret;
+
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ if ((ret = __db_home(dbenv, home, flags)) != 0)
+ return (ret);
+ return (__dbcl_env_open(dbenv, dbenv->db_home, flags, mode));
+}
+
+/*
+ * __dbcl_db_open_wrap --
+ * Wrapper function for DB->open function for clients.
+ * We need a wrapper function to error on DB_THREAD flag.
+ * and we don't want to complicate the generated code.
+ *
+ * PUBLIC: int __dbcl_db_open_wrap
+ * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *,
+ * PUBLIC: DBTYPE, u_int32_t, int));
+ */
+int
+__dbcl_db_open_wrap(dbp, txnp, name, subdb, type, flags, mode)
+ DB * dbp;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbp->dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ return (__dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode));
+}
+
+/*
+ * __dbcl_refresh --
+ * Clean up an environment.
+ *
+ * PUBLIC: int __dbcl_refresh __P((DB_ENV *));
+ */
+int
+__dbcl_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ CLIENT *cl;
+ int ret;
+
+ cl = (CLIENT *)dbenv->cl_handle;
+
+ ret = 0;
+ if (dbenv->tx_handle != NULL) {
+ /*
+ * We only need to free up our stuff, the caller
+ * of this function will call the server who will
+ * do all the real work.
+ */
+ ret = __dbcl_txn_close(dbenv);
+ dbenv->tx_handle = NULL;
+ }
+ if (!F_ISSET(dbenv, DB_ENV_RPCCLIENT_GIVEN) && cl != NULL)
+ clnt_destroy(cl);
+ dbenv->cl_handle = NULL;
+ if (dbenv->db_home != NULL) {
+ __os_free(dbenv, dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __dbcl_retcopy --
+ * Copy the returned data into the user's DBT, handling allocation flags,
+ * but not DB_DBT_PARTIAL.
+ *
+ * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__dbcl_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ int ret;
+ u_int32_t orig_flags;
+
+ /*
+ * The RPC server handles DB_DBT_PARTIAL, so we mask it out here to
+ * avoid the handling of partials in __db_retcopy.
+ */
+ orig_flags = dbt->flags;
+ F_CLR(dbt, DB_DBT_PARTIAL);
+ ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize);
+ dbt->flags = orig_flags;
+ return (ret);
+}
+
+/*
+ * __dbcl_txn_close --
+ * Clean up an environment's transactions.
+ */
+int
+__dbcl_txn_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ * Also this function is called *after* the server has been called,
+ * so the server has already closed/aborted any transactions that
+ * were open on its side. We only need to do local cleanup.
+ */
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL)
+ __dbcl_txn_end(txnp);
+
+ __os_free(dbenv, tmgrp);
+ return (ret);
+
+}
+
+/*
+ * __dbcl_txn_end --
+ * Clean up an transaction.
+ * RECURSIVE FUNCTION: Clean up nested transactions.
+ *
+ * PUBLIC: void __dbcl_txn_end __P((DB_TXN *));
+ */
+void
+__dbcl_txn_end(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *kids;
+ DB_TXNMGR *mgr;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+
+ /*
+ * First take care of any kids we have
+ */
+ for (kids = TAILQ_FIRST(&txnp->kids);
+ kids != NULL;
+ kids = TAILQ_FIRST(&txnp->kids))
+ __dbcl_txn_end(kids);
+
+ /*
+ * We are ending this transaction no matter what the parent
+ * may eventually do, if we have a parent. All those details
+ * are taken care of by the server. We only need to make sure
+ * that we properly release resources.
+ */
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ __os_free(dbenv, txnp);
+}
+
+/*
+ * __dbcl_txn_setup --
+ * Setup a client transaction structure.
+ *
+ * PUBLIC: void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
+ */
+void
+__dbcl_txn_setup(dbenv, txn, parent, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXN *parent;
+ u_int32_t id;
+{
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ txn->txnid = id;
+
+ /*
+ * XXX
+ * In DB library the txn_chain is protected by the mgrp->mutexp.
+ * However, that mutex is implemented in the environments shared
+ * memory region. The client library does not support all of the
+ * region - that just get forwarded to the server. Therefore,
+ * the chain is unprotected here, but properly protected on the
+ * server.
+ */
+ TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
+
+ TAILQ_INIT(&txn->kids);
+
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ txn->abort = __dbcl_txn_abort;
+ txn->commit = __dbcl_txn_commit;
+ txn->discard = __dbcl_txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __dbcl_txn_prepare;
+ txn->set_timeout = __dbcl_txn_timeout;
+
+ txn->flags = TXN_MALLOC;
+}
+
+/*
+ * __dbcl_c_destroy --
+ * Destroy a cursor.
+ */
+static int
+__dbcl_c_destroy(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+
+ dbp = dbc->dbp;
+
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ /* Discard any memory used to store returned data. */
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rdata.data);
+ __os_free(NULL, dbc);
+
+ return (0);
+}
+
+/*
+ * __dbcl_c_refresh --
+ * Refresh a cursor. Move it from the active queue to the free queue.
+ *
+ * PUBLIC: void __dbcl_c_refresh __P((DBC *));
+ */
+void
+__dbcl_c_refresh(dbc)
+ DBC *dbc;
+{
+ DB *dbp;
+
+ dbp = dbc->dbp;
+ dbc->flags = 0;
+ dbc->cl_id = 0;
+
+ /*
+ * If dbp->cursor fails locally, we use a local dbc so that
+ * we can close it. In that case, dbp will be NULL.
+ */
+ if (dbp != NULL) {
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
+ }
+}
+
+/*
+ * __dbcl_c_setup --
+ * Allocate a cursor.
+ *
+ * PUBLIC: int __dbcl_c_setup __P((long, DB *, DBC **));
+ */
+int
+__dbcl_c_setup(cl_id, dbp, dbcp)
+ long cl_id;
+ DB *dbp;
+ DBC **dbcp;
+{
+ DBC *dbc, tmpdbc;
+ int ret;
+
+ if ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ TAILQ_REMOVE(&dbp->free_queue, dbc, links);
+ else {
+ if ((ret =
+ __os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0) {
+ /*
+ * If we die here, set up a tmp dbc to call the
+ * server to shut down that cursor.
+ */
+ tmpdbc.dbp = NULL;
+ tmpdbc.cl_id = cl_id;
+ (void)__dbcl_dbc_close(&tmpdbc);
+ return (ret);
+ }
+ dbc->c_close = __dbcl_dbc_close;
+ dbc->c_count = __dbcl_dbc_count;
+ dbc->c_del = __dbcl_dbc_del;
+ dbc->c_dup = __dbcl_dbc_dup;
+ dbc->c_get = __dbcl_dbc_get;
+ dbc->c_pget = __dbcl_dbc_pget;
+ dbc->c_put = __dbcl_dbc_put;
+ dbc->c_am_destroy = __dbcl_c_destroy;
+ }
+ dbc->cl_id = cl_id;
+ dbc->dbp = dbp;
+ TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
+ *dbcp = dbc;
+ return (0);
+}
+
+/*
+ * __dbcl_dbclose_common --
+ * Common code for closing/cleaning a dbp.
+ *
+ * PUBLIC: int __dbcl_dbclose_common __P((DB *));
+ */
+int
+__dbcl_dbclose_common(dbp)
+ DB *dbp;
+{
+ int ret, t_ret;
+ DBC *dbc;
+
+ /*
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine.
+ *
+ * NOTE: We do not need to use the join_queue for join cursors.
+ * See comment in __dbcl_dbjoin_ret.
+ */
+ ret = 0;
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ __dbcl_c_refresh(dbc);
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __dbcl_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ TAILQ_INIT(&dbp->free_queue);
+ TAILQ_INIT(&dbp->active_queue);
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
+
+ memset(dbp, CLEAR_BYTE, sizeof(*dbp));
+ __os_free(NULL, dbp);
+ return (ret);
+}
+#endif /* HAVE_RPC */
diff --git a/storage/bdb/rpc_client/gen_client_ret.c b/storage/bdb/rpc_client/gen_client_ret.c
new file mode 100644
index 00000000000..f35589738cd
--- /dev/null
+++ b/storage/bdb/rpc_client/gen_client_ret.c
@@ -0,0 +1,824 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: gen_client_ret.c,v 1.57 2002/08/06 06:18:37 bostic Exp $";
+#endif /* not lint */
+
+#ifdef HAVE_RPC
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+/*
+ * PUBLIC: int __dbcl_env_close_ret
+ * PUBLIC: __P((DB_ENV *, u_int32_t, __env_close_reply *));
+ */
+int
+__dbcl_env_close_ret(dbenv, flags, replyp)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(NULL, dbenv);
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_create_ret
+ * PUBLIC: __P((DB_ENV *, long, __env_create_reply *));
+ */
+int
+__dbcl_env_create_ret(dbenv, timeout, replyp)
+ DB_ENV * dbenv;
+ long timeout;
+ __env_create_reply *replyp;
+{
+
+ COMPQUIET(timeout, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbenv->cl_id = replyp->envcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_open_ret __P((DB_ENV *,
+ * PUBLIC: const char *, u_int32_t, int, __env_open_reply *));
+ */
+int
+__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ int mode;
+ __env_open_reply *replyp;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(mode, 0);
+
+ /*
+ * If error, return it.
+ */
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv->cl_id = replyp->envcl_id;
+ /*
+ * If the user requested transactions, then we have some
+ * local client-side setup to do also.
+ */
+ if (LF_ISSET(DB_INIT_TXN)) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+ dbenv->tx_handle = tmgrp;
+ }
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_remove_ret
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+ */
+int
+__dbcl_env_remove_ret(dbenv, home, flags, replyp)
+ DB_ENV *dbenv;
+ const char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(home, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_refresh(dbenv);
+ __os_free(NULL, dbenv);
+ if (replyp->status == 0 && ret != 0)
+ return (ret);
+ else
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+ */
+int
+__dbcl_txn_abort_ret(txnp, replyp)
+ DB_TXN *txnp;
+ __txn_abort_reply *replyp;
+{
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_begin_ret __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+ */
+int
+__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
+ DB_ENV *envp;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+ __dbcl_txn_setup(envp, txn, parent, replyp->txnidcl_id);
+ *txnpp = txn;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_commit_ret
+ * PUBLIC: __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+ */
+int
+__dbcl_txn_commit_ret(txnp, flags, replyp)
+ DB_TXN *txnp;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+int
+__dbcl_txn_discard_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long,
+ * PUBLIC: long *, u_int32_t, __txn_recover_reply *));
+ */
+int
+__dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp)
+ DB_ENV * dbenv;
+ DB_PREPLIST * preplist;
+ long count;
+ long * retp;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+{
+ DB_PREPLIST *prep;
+ DB_TXN *txnarray, *txn;
+ u_int32_t i, *txnid;
+ int ret;
+ u_int8_t *gid;
+
+ COMPQUIET(flags, 0);
+ COMPQUIET(count, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ *retp = (long) replyp->retcount;
+
+ if (replyp->retcount == 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(dbenv, replyp->retcount, sizeof(DB_TXN),
+ &txnarray)) != 0)
+ return (ret);
+ /*
+ * We have a bunch of arrays that need to iterate in
+ * lockstep with each other.
+ */
+ i = 0;
+ txn = txnarray;
+ txnid = (u_int32_t *)replyp->txn.txn_val;
+ gid = (u_int8_t *)replyp->gid.gid_val;
+ prep = preplist;
+ while (i++ < replyp->retcount) {
+ __dbcl_txn_setup(dbenv, txn, NULL, *txnid);
+ prep->txn = txn;
+ memcpy(&prep->gid, gid, DB_XIDDATASIZE);
+ /*
+ * Now increment all our array pointers.
+ */
+ txn++;
+ gid += DB_XIDDATASIZE;
+ txnid++;
+ prep++;
+ }
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+ */
+int
+__dbcl_db_close_ret(dbp, flags, replyp)
+ DB *dbp;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_create_ret
+ * PUBLIC: __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
+ */
+int
+__dbcl_db_create_ret(dbp, dbenv, flags, replyp)
+ DB * dbp;
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+{
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbp->cl_id = replyp->dbcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_get_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+ */
+int
+__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(dbenv, key->data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_key_range_ret __P((DB *, DB_TXN *,
+ * PUBLIC: DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+ */
+int
+__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key;
+ DB_KEY_RANGE *range;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(key, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ range->less = replyp->less;
+ range->equal = replyp->equal;
+ range->greater = replyp->greater;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+ */
+int
+__dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+ __db_open_reply *replyp;
+{
+ COMPQUIET(txn, NULL);
+ COMPQUIET(name, NULL);
+ COMPQUIET(subdb, NULL);
+ COMPQUIET(type, 0);
+ COMPQUIET(flags, 0);
+ COMPQUIET(mode, 0);
+
+ if (replyp->status == 0) {
+ dbp->cl_id = replyp->dbcl_id;
+ dbp->type = replyp->type;
+ /*
+ * We get back the database's byteorder on the server.
+ * Determine if our byteorder is the same or not by
+ * calling __db_set_lorder.
+ *
+ * XXX
+ * This MUST come before we set the flags because
+ * __db_set_lorder checks that it is called before
+ * the open flag is set.
+ */
+ (void)__db_set_lorder(dbp, replyp->lorder);
+
+ /*
+ * XXX
+ * This is only for Tcl which peeks at the dbp flags.
+ * When dbp->get_flags exists, this should go away.
+ */
+ dbp->flags = replyp->dbflags;
+ }
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t, __db_pget_reply *));
+ */
+int
+__dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbp->my_rskey.data,
+ &dbp->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
+ */
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_put_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+ */
+int
+__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBT *key, *data;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(data, NULL);
+
+ ret = replyp->status;
+ if (replyp->status == 0 && (flags == DB_APPEND))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_remove_ret __P((DB *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_remove_reply *));
+ */
+int
+__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_rename_ret __P((DB *, const char *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_rename_reply *));
+ */
+int
+__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+{
+ int ret;
+
+ COMPQUIET(name, 0);
+ COMPQUIET(subdb, 0);
+ COMPQUIET(newname, 0);
+ COMPQUIET(flags, 0);
+
+ ret = __dbcl_dbclose_common(dbp);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ else
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_stat_ret
+ * PUBLIC: __P((DB *, void *, u_int32_t, __db_stat_reply *));
+ */
+int
+__dbcl_db_stat_ret(dbp, sp, flags, replyp)
+ DB *dbp;
+ void *sp;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+{
+ int len, ret;
+ u_int32_t i, *q, *p, *retsp;
+
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0 || sp == NULL)
+ return (replyp->status);
+
+ len = replyp->stats.stats_len * sizeof(u_int32_t);
+ if ((ret = __os_umalloc(dbp->dbenv, len, &retsp)) != 0)
+ return (ret);
+ for (i = 0, q = retsp, p = (u_int32_t *)replyp->stats.stats_val;
+ i < replyp->stats.stats_len; i++, q++, p++)
+ *q = *p;
+ *(u_int32_t **)sp = retsp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *,
+ * PUBLIC: u_int32_t, __db_truncate_reply *));
+ */
+int
+__dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ u_int32_t *countp, flags;
+ __db_truncate_reply *replyp;
+{
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->count;
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_cursor_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+ */
+int
+__dbcl_db_cursor_ret(dbp, txnp, dbcp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ DBC **dbcp;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+{
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_db_join_ret
+ * PUBLIC: __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+ */
+int
+__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
+ DB *dbp;
+ DBC **curs, **dbcp;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+{
+ COMPQUIET(curs, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ /*
+ * We set this up as a normal cursor. We do not need
+ * to treat a join cursor any differently than a normal
+ * cursor, even though DB itself must. We only need the
+ * client-side cursor/db relationship to know what cursors
+ * are open in the db, and to store their ID. Nothing else.
+ */
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+ */
+int
+__dbcl_dbc_close_ret(dbc, replyp)
+ DBC *dbc;
+ __dbc_close_reply *replyp;
+{
+ __dbcl_c_refresh(dbc);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_count_ret
+ * PUBLIC: __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+ */
+int
+__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
+ DBC *dbc;
+ db_recno_t *countp;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+{
+ COMPQUIET(dbc, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->dupcount;
+
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_dup_ret
+ * PUBLIC: __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+ */
+int
+__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
+ DBC *dbc, **dbcp;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbc->dbp, dbcp));
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_get_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+ */
+int
+__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbc->dbp->dbenv;
+ oldkey = key->data;
+ ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
+ replyp->keydata.keydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
+ if (ret)
+ return (ret);
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
+
+ /*
+ * If an error on copying 'data' and we allocated for 'key'
+ * free it before returning the error.
+ */
+ if (ret && oldkey != NULL)
+ __os_free(dbenv, key->data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_pget_reply *));
+ */
+int
+__dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp)
+ DBC * dbc;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbc->dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbc->my_rskey.data,
+ &dbc->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
+ */
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_put_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+ */
+int
+__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
+ DBT *key, *data;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+{
+ COMPQUIET(data, NULL);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ if (replyp->status == 0 && dbc->dbp->type == DB_RECNO &&
+ (flags == DB_AFTER || flags == DB_BEFORE))
+ *(db_recno_t *)key->data =
+ *(db_recno_t *)replyp->keydata.keydata_val;
+ return (replyp->status);
+}
+#endif /* HAVE_RPC */
diff --git a/storage/bdb/rpc_server/c/db_server_proc.c.in b/storage/bdb/rpc_server/c/db_server_proc.c.in
new file mode 100644
index 00000000000..d5d1f49508a
--- /dev/null
+++ b/storage/bdb/rpc_server/c/db_server_proc.c.in
@@ -0,0 +1,2500 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id: db_server_proc.c,v 1.92 2002/07/29 15:21:20 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/* BEGIN __env_cachesize_proc */
+/*
+ * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, __env_cachesize_reply *));
+ */
+void
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+/*
+ * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
+ */
+void
+__env_close_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+/*
+ * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));
+ */
+void
+__env_create_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *ctp;
+ int ret;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+ if ((ret = db_env_create(&dbenv, 0)) == 0) {
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+/*
+ * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: __env_dbremove_reply *));
+ */
+void
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __env_dbremove_reply *replyp;
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(dbenv, txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+/*
+ * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,
+ * PUBLIC: u_int32_t, __env_dbrename_reply *));
+ */
+void
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, newname, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __env_dbrename_reply *replyp;
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(dbenv, txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+/*
+ * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_encrypt_reply *));
+ */
+void
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)
+ long dbenvcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __env_encrypt_reply *replyp;
+/* END __env_encrypt_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(dbenv, passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+/*
+ * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __env_flags_reply *));
+ */
+void
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(dbenv, flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+/*
+ * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,
+ * PUBLIC: __env_open_reply *));
+ */
+void
+__env_open_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(dbenv, fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+/*
+ * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_remove_reply *));
+ */
+void
+__env_remove_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(dbenv, fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+/*
+ * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));
+ */
+void
+__txn_abort_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort(txnp);
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+/*
+ * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,
+ * PUBLIC: __txn_begin_reply *));
+ */
+void
+__txn_begin_proc(dbenvcl_id, parentcl_id,
+ flags, replyp)
+ long dbenvcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_proc */
+{
+ DB_ENV *dbenv;
+ DB_TXN *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(dbenv, parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+/*
+ * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_commit_reply *));
+ */
+void
+__txn_commit_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+/*
+ * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+void
+__txn_discard_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+/* END __txn_discard_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+/*
+ * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,
+ * PUBLIC: __txn_prepare_reply *));
+ */
+void
+__txn_prepare_proc(txnpcl_id, gid, replyp)
+ long txnpcl_id;
+ u_int8_t *gid;
+ __txn_prepare_reply *replyp;
+/* END __txn_prepare_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(txnp, gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+/*
+ * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __txn_recover_reply *, int *));
+ */
+void
+__txn_recover_proc(dbenvcl_id, count,
+ flags, replyp, freep)
+ long dbenvcl_id;
+ u_int32_t count;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+ int * freep;
+/* END __txn_recover_proc */
+{
+ DB_ENV *dbenv;
+ DB_PREPLIST *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ u_int8_t *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ dbprep = NULL;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv, count * sizeof(DB_PREPLIST), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbenv, dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount;
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv, retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv, retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv, replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv, replyp->txn.txn_val);
+ __os_free(dbenv, replyp->gid.gid_val);
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+/*
+ * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_maxkey_reply *));
+ */
+void
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(dbp, maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+/*
+ * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,
+ * PUBLIC: __db_associate_reply *));
+ */
+void
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ long sdbpcl_id;
+ u_int32_t flags;
+ __db_associate_reply *replyp;
+/* END __db_associate_proc */
+{
+ DB *dbp, *sdbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (DB *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(dbp, txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+/*
+ * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_minkey_reply *));
+ */
+void
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(dbp, minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+/*
+ * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
+ */
+void
+__db_close_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+/*
+ * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
+ */
+void
+__db_create_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+/* END __db_create_proc */
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ if ((ret = db_create(&dbp, dbenv, flags)) == 0) {
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+/*
+ * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
+ */
+void
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+
+ /* Set up key DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.flags = keyflags;
+ key.size = keysize;
+ key.data = keydata;
+
+ ret = dbp->del(dbp, txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+/*
+ * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __db_encrypt_reply *));
+ */
+void
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)
+ long dbpcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __db_encrypt_reply *replyp;
+/* END __db_encrypt_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(dbp, passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+/*
+ * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_extentsize_reply *));
+ */
+void
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(dbp, extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+/*
+ * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
+ */
+void
+__db_flags_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(dbp, flags);
+ dbp_ctp->ct_dbdp.setflags |= flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+/*
+ * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *));
+ */
+void
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.ulen = keyulen;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.ulen = dataulen;
+ /*
+ * Ignore memory related flags on server.
+ */
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(dbp, txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+/*
+ * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_ffactor_reply *));
+ */
+void
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(dbp, ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+/*
+ * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_nelem_reply *));
+ */
+void
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(dbp, nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+/*
+ * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
+ */
+void
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_KEY_RANGE range;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.size = keysize;
+ key.data = keydata;
+ key.flags = keyflags;
+
+ ret = dbp->key_range(dbp, txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+/*
+ * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
+ */
+void
+__db_lorder_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(dbp, lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+/*
+ * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *));
+ */
+void
+__db_open_proc(dbpcl_id, txnpcl_id, name,
+ subdb, type, flags, mode, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(dbp, txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(dbp, &dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(dbp, &isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+/*
+ * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_pagesize_reply *));
+ */
+void
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(dbp, pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+/*
+ * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *));
+ */
+void
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,
+ skeydoff, skeyulen, skeyflags, skeydata,
+ skeysize, pkeydlen, pkeydoff, pkeyulen,
+ pkeyflags, pkeydata, pkeysize, datadlen,
+ datadoff, dataulen, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+ int * freep;
+/* END __db_pget_proc */
+{
+ DB *dbp;
+ DBT skey, pkey, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(dbp, txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+/*
+ * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *));
+ */
+void
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(dbp, txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+/*
+ * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,
+ * PUBLIC: __db_re_delim_reply *));
+ */
+void
+__db_re_delim_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(dbp, delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+/*
+ * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
+ */
+void
+__db_re_len_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(dbp, len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+/*
+ * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
+ */
+void
+__db_re_pad_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(dbp, pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+/*
+ * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,
+ * PUBLIC: __db_remove_reply *));
+ */
+void
+__db_remove_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(dbp, name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+/*
+ * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,
+ * PUBLIC: __db_rename_reply *));
+ */
+void
+__db_rename_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(dbp, name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+/*
+ * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,
+ * PUBLIC: int *));
+ */
+void
+__db_stat_proc(dbpcl_id, flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_proc */
+{
+ DB *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(dbp, &sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(dbp, &type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->dbenv, len * replyp->stats.stats_len,
+ &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+/*
+ * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
+ */
+void
+__db_sync_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(dbp, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+/*
+ * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_truncate_reply *));
+ */
+void
+__db_truncate_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_truncate_reply *replyp;
+/* END __db_truncate_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(dbp, txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+/*
+ * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_cursor_reply *));
+ */
+void
+__db_cursor_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DB_TXN *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+/*
+ * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,
+ * PUBLIC: __db_join_reply *));
+ */
+void
+__db_join_proc(dbpcl_id, curs, curslen,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curs;
+ u_int32_t curslen;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_proc */
+{
+ DB *dbp;
+ DBC **jcurs, **c;
+ DBC *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(DBC *);
+ if ((ret = __os_calloc(dbp->dbenv,
+ curslen + 1, sizeof(DBC *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+/*
+ * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));
+ */
+void
+__dbc_close_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+/*
+ * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
+ */
+void
+__dbc_count_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_count(dbc, &num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+/*
+ * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
+ */
+void
+__dbc_del_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_del(dbc, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+/*
+ * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
+ */
+void
+__dbc_dup_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_proc */
+{
+ DBC *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+/*
+ * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *));
+ */
+void
+__dbc_get_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_proc */
+{
+ DBC *dbc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbenv, data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_get(dbc, &key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbenv, key.size,
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv, data.size,
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv, replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+/*
+ * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,
+ * PUBLIC: u_int32_t, __dbc_pget_reply *, int *));
+ */
+void
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,
+ skeyulen, skeyflags, skeydata, skeysize,
+ pkeydlen, pkeydoff, pkeyulen, pkeyflags,
+ pkeydata, pkeysize, datadlen, datadoff,
+ dataulen, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+ int * freep;
+/* END __dbc_pget_proc */
+{
+ DBC *dbc;
+ DBT skey, pkey, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_pget(dbc, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+/*
+ * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *));
+ */
+void
+__dbc_put_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = 0;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->c_put(dbc, &key, &data, flags);
+
+ *freep = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
+ dbp->type == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = key.data;
+ replyp->keydata.keydata_len = key.size;
+ } else {
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/storage/bdb/rpc_server/c/db_server_util.c b/storage/bdb/rpc_server/c/db_server_util.c
new file mode 100644
index 00000000000..2ea270c2d19
--- /dev/null
+++ b/storage/bdb/rpc_server/c/db_server_util.c
@@ -0,0 +1,815 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_server_util.c,v 1.59 2002/03/27 04:32:50 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+extern int __dbsrv_main __P((void));
+static int add_home __P((char *));
+static int add_passwd __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(argc, argv)
+ int argc;
+ char **argv;
+{
+ extern char *optarg;
+ CLIENT *cl;
+ int ch, ret;
+ char *passwd;
+
+ prog = argv[0];
+
+ version_check();
+
+ ret = 0;
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ return (EXIT_FAILURE);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
+ break;
+ case 'T':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ return (EXIT_FAILURE);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(prog)
+ char *prog;
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home] [-P passwd]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(EXIT_FAILURE);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/*
+ * PUBLIC: void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+ */
+void
+__dbsrv_settimeout(ctp, to)
+ ct_entry *ctp;
+ u_int32_t to;
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+/*
+ * PUBLIC: void __dbsrv_timeout __P((int));
+ */
+void
+__dbsrv_timeout(force)
+ int force;
+{
+ static long to_hint = -1;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)((DB_TXN *)ctp->ct_anyp)->
+ abort((DB_TXN *)ctp->ct_anyp);
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(parent)
+ ct_entry *parent;
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+/*
+ * PUBLIC: void __dbclear_ctp __P((ct_entry *));
+ */
+void
+__dbclear_ctp(ctp)
+ ct_entry *ctp;
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(NULL, ctp);
+}
+
+/*
+ * PUBLIC: void __dbdel_ctp __P((ct_entry *));
+ */
+void
+__dbdel_ctp(parent)
+ ct_entry *parent;
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+/*
+ * PUBLIC: ct_entry *new_ct_ent __P((int *));
+ */
+ct_entry *
+new_ct_ent(errp)
+ int *errp;
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ memset(ctp, 0, sizeof(ct_entry));
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+/*
+ * PUBLIC: ct_entry *get_tableent __P((long));
+ */
+ct_entry *
+get_tableent(id)
+ long id;
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t));
+ */
+ct_entry *
+__dbsrv_sharedb(db_ctp, name, subdb, type, flags)
+ ct_entry *db_ctp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * PUBLIC: ct_entry *__dbsrv_shareenv __P((ct_entry *, home_entry *, u_int32_t));
+ */
+ct_entry *
+__dbsrv_shareenv(env_ctp, home, flags)
+ ct_entry *env_ctp;
+ home_entry *home;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void __dbsrv_active __P((ct_entry *));
+ */
+void
+__dbsrv_active(ctp)
+ ct_entry *ctp;
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+/*
+ * PUBLIC: int __db_close_int __P((long, u_int32_t));
+ */
+int
+__db_close_int(id, flags)
+ long id;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(dbp, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbc_close_int __P((ct_entry *));
+ */
+int
+__dbc_close_int(dbc_ctp)
+ ct_entry *dbc_ctp;
+{
+ DBC *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_close(dbc);
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+/*
+ * PUBLIC: int __dbenv_close_int __P((long, u_int32_t, int));
+ */
+int
+__dbenv_close_int(id, flags, force)
+ long id;
+ u_int32_t flags;
+ int force;
+{
+ DB_ENV *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
+ dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
+
+ ret = dbenv->close(dbenv, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(home)
+ char *home;
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ hp->passwd = NULL;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ __os_free(NULL, hp->home);
+ __os_free(NULL, hp);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+static int
+add_passwd(passwd)
+ char *passwd;
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
+/*
+ * PUBLIC: home_entry *get_home __P((char *));
+ */
+home_entry *
+get_home(name)
+ char *name;
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp);
+ return (NULL);
+}
+
+static int
+env_recover(progname)
+ char *progname;
+{
+ DB_ENV *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ exit(EXIT_FAILURE);
+ }
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(dbenv, hp->passwd,
+ DB_ENCRYPT_AES);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(dbenv, hp->home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/storage/bdb/rpc_server/clsrv.html b/storage/bdb/rpc_server/clsrv.html
new file mode 100644
index 00000000000..599ad56f557
--- /dev/null
+++ b/storage/bdb/rpc_server/clsrv.html
@@ -0,0 +1,453 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.76 [en] (X11; U; FreeBSD 4.3-RELEASE i386) [Netscape]">
+</head>
+<body>
+
+<center>
+<h1>
+&nbsp;Client/Server Interface for Berkeley DB</h1></center>
+
+<center><i>Susan LoVerso</i>
+<br><i>sue@sleepycat.com</i>
+<br><i>Rev 1.3</i>
+<br><i>1999 Nov 29</i></center>
+
+<p>We provide an interface allowing client/server access to Berkeley DB.&nbsp;&nbsp;
+Our goal is to provide a client and server library to allow users to separate
+the functionality of their applications yet still have access to the full
+benefits of Berkeley DB.&nbsp; The goal is to provide a totally seamless
+interface with minimal modification to existing applications as well.
+<p>The client/server interface for Berkeley DB can be broken up into several
+layers.&nbsp; At the lowest level there is the transport mechanism to send
+out the messages over the network.&nbsp; Above that layer is the messaging
+layer to interpret what comes over the wire, and bundle/unbundle message
+contents.&nbsp; The next layer is Berkeley DB itself.
+<p>The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).&nbsp;
+We declare our message types and operations supported by our program and
+the RPC library and utilities pretty much take care of the rest.&nbsp;
+The
+<i>rpcgen</i> program generates all of the low level code needed.&nbsp;
+We need to define both sides of the RPC.
+<br>&nbsp;
+<h2>
+<a NAME="DB Modifications"></a>DB Modifications</h2>
+To achieve the goal of a seamless interface, it is necessary to impose
+a constraint on the application. That constraint is simply that all database
+access must be done through an open environment.&nbsp; I.e. this model
+does not support standalone databases.&nbsp; The reason for this constraint
+is so that we have an environment structure internally to store our connection
+to the server.&nbsp; Imposing this constraint means that we can provide
+the seamless interface just by adding a single environment method: <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>.
+<p>The planned interface for this method is:
+<pre>DBENV->set_rpc_server(dbenv,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* DB_ENV structure */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; hostname&nbsp;&nbsp;&nbsp; /* Host of server */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; cl_timeout, /* Client timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; srv_timeout,/* Server timeout (sec) */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags);&nbsp;&nbsp;&nbsp;&nbsp; /* Flags: unused */</pre>
+This new method takes the hostname of the server, establishes our connection
+and an environment on the server.&nbsp; If a server timeout is specified,
+then we send that to the server as well (and the server may or may not
+choose to use that value).&nbsp; This timeout is how long the server will
+allow the environment to remain idle before declaring it dead and releasing
+resources on the server.&nbsp; The pointer to the connection is stored
+on the client in the DBENV structure and is used by all other methods to
+figure out with whom to communicate.&nbsp; If a client timeout is specified,
+it indicates how long the client is willing to wait for a reply from the
+server.&nbsp; If the values are 0, then defaults are used.&nbsp; Flags
+is currently unused, but exists because we always need to have a placeholder
+for flags and it would be used for specifying authentication desired (were
+we to provide an authentication scheme at some point) or other uses not
+thought of yet!
+<p>This client code is part of the monolithic DB library.&nbsp; The user
+accesses the client functions via a new flag to <a href="../docs/api_c/db_env_create.html">db_env_create()</a>.&nbsp;
+That flag is DB_CLIENT.&nbsp; By using this flag the user indicates they
+want to have the client methods rather than the standard methods for the
+environment.&nbsp; Also by issuing this flag, the user needs to connect
+to the server via the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.
+<p>We need two new fields in the <i>DB_ENV </i>structure.&nbsp; One is
+the socket descriptor to communicate to the server, the other field is
+the client identifier the server gives to us.&nbsp; The <i>DB, </i>and<i>
+DBC </i>only need one additional field, the client identifier.&nbsp; The
+<i>DB_TXN</i>
+structure does not need modification, we are overloading the <i>txn_id
+</i>field.
+<h2>
+Issues</h2>
+We need to figure out what to do in case of client and server crashes.&nbsp;
+Both the client library and the server program are stateful.&nbsp; They
+both consume local resources during the lifetime of the connection.&nbsp;
+Should one end drop that connection, the other side needs to release those
+resources.
+<p>If the server crashes, then the client will get an error back.&nbsp;
+I have chosen to implement time-outs on the client side, using a default
+or allowing the application to specify one through the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.&nbsp; Either the current operation will time-out waiting for the
+reply or the next operation called will time out (or get back some other
+kind of error regarding the server's non-existence).&nbsp; In any case,
+if the client application gets back such an error, it should abort any
+open transactions locally, close any databases, and close its environment.&nbsp;
+It may then decide to retry to connect to the server periodically or whenever
+it comes back.&nbsp; If the last operation a client did was a transaction
+commit that did not return or timed out from the server, the client cannot
+determine if the transaction was committed or not but must release the
+local transaction resources. Once the server is back up, recovery must
+be run on the server.&nbsp;&nbsp; If the transaction commit completed on
+the server before the crash, then the operation is redone, if the transaction
+commit did not get to the server, the pieces of the transaction are undone
+on recover.&nbsp; The client can then re-establish its connection and begin
+again.&nbsp; This is effectively like beginning over.&nbsp; The client
+cannot use ID's from its previous connection to the server.&nbsp; However,
+if recovery is run, then consistency is assured.
+<p>If the client crashes, the server needs to somehow figure this out.&nbsp;
+The server is just sitting there waiting for a request to come in.&nbsp;
+A server must be able to time-out a client.&nbsp; Similar to ftpd, if a
+connection is idle for N seconds, then the server decides the client is
+dead and releases that client's resources, aborting any open transactions,
+closing any open databases and environments.&nbsp;&nbsp; The server timing
+out a client is not a trivial issue however.&nbsp; The generated function
+for the server just calls <i>svc_run()</i>.&nbsp; The server code I write
+contains procedures to do specific things.&nbsp; We do not have access
+to the code calling <i>select()</i>.&nbsp; Timing out the select is not
+good enough even if we could do so.&nbsp; We want to time-out idle environments,
+not simply cause a time-out if the server is idle a while.&nbsp; See the
+discussion of the <a href="#The Server Program">server program</a> for
+a description of how we accomplish this.
+<p>Since rpcgen generates the main() function of the server, I do not yet
+know how we are going to have the server multi-threaded or multi-process
+without changing the generated code.&nbsp; The RPC book indicates that
+the only way to accomplish this is through modifying the generated code
+in the server.&nbsp; <b>For the moment we will ignore this issue while
+we get the core server working, as it is only a performance issue.</b>
+<p>We do not do any security or authentication.&nbsp; Someone could get
+the code and modify it to spoof messages, trick the server, etc.&nbsp;
+RPC has some amount of authentication built into it.&nbsp; I haven't yet
+looked into it much to know if we want to use it or just point a user at
+it.&nbsp; The changes to the client code are fairly minor, the changes
+to our server procs are fairly minor.&nbsp; We would have to add code to
+a <i>sed</i> script or <i>awk</i> script to change the generated server
+code (yet again) in the dispatch routine to perform authentication.
+<p>We will need to get an official program number from Sun.&nbsp; We can
+get this by sending mail to <i>rpc@sun.com</i> and presumably at some point
+they will send us back a program number that we will encode into our XDR
+description file.&nbsp; Until we release this we can use a program number
+in the "user defined" number space.
+<br>&nbsp;
+<h2>
+<a NAME="The Server Program"></a>The Server Program</h2>
+The server is a standalone program that the user builds and runs, probably
+as a daemon like process.&nbsp; This program is linked against the Berkeley
+DB library and the RPC library (which is part of the C library on my FreeBSD
+machine, others may have/need <i>-lrpclib</i>).&nbsp; The server basically
+is a slave to the client process.&nbsp; All messages from the client are
+synchronous and two-way.&nbsp; The server handles messages one at a time,
+and sends a reply back before getting another message.&nbsp; There are
+no asynchronous messages generated by the server to the client.
+<p>We have made a choice to modify the generated code for the server.&nbsp;
+The changes will be minimal, generally calling functions we write, that
+are in other source files.&nbsp; The first change is adding a call to our
+time-out function as described below.&nbsp; The second change is changing
+the name of the generated <i>main()</i> function to <i>__dbsrv_main()</i>,
+and adding our own <i>main()</i> function so that we can parse options,
+and set up other initialization we require.&nbsp; I have a <i>sed</i> script
+that is run from the distribution scripts that massages the generated code
+to make these minor changes.
+<p>Primarily the code needed for the server is the collection of the specified
+RPC functions.&nbsp; Each function receives the structure indicated, and
+our code takes out what it needs and passes the information into DB itself.&nbsp;
+The server needs to maintain a translation table for identifiers that we
+pass back to the client for the environment, transaction and database handles.
+<p>The table that the server maintains, assuming one client per server
+process/thread, should contain the handle to the environment, database
+or transaction, a link to maintain parent/child relationships between transactions,
+or databases and cursors, this handle's identifier, a type so that we can
+error if the client passes us a bad id for this call, and a link to this
+handle's environment entry (for time out/activity purposes).&nbsp; The
+table contains, in entries used by environments, a time-out value and an
+activity time stamp.&nbsp; Its use is described below for timing out idle
+clients.
+<p>Here is how we time out clients in the server.&nbsp; We have to modify
+the generated server code, but only to add one line during the dispatch
+function to run the time-out function.&nbsp; The call is made right before
+the return of the dispatch function, after the reply is sent to the client,
+so that client's aren't kept waiting for server bookkeeping activities.&nbsp;
+This time-out function then runs every time the server processes a request.&nbsp;
+In the time-out function we maintain a time-out hint that is the youngest
+environment to time-out.&nbsp; If the current time is less than the hint
+we know we do not need to run through the list of open handles.&nbsp; If
+the hint is expired, then we go through the list of open environment handles,
+and if they are past their expiration, then we close them and clean up.&nbsp;
+If they are not, we set up the hint for the next time.
+<p>Each entry in the open handle table has a pointer back to its environment's
+entry.&nbsp; Every operation within this environment can then update the
+single environment activity record.&nbsp; Every environment can have a
+different time-out.&nbsp; The <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server
+</a>call
+takes a server time-out value.&nbsp; If this value is 0 then a default
+(currently 5 minutes) is used.&nbsp; This time-out value is only a hint
+to the server.&nbsp; It may choose to disregard this value or set the time-out
+based on its own implementation.
+<p>For completeness, the flaws of this time-out implementation should be
+pointed out.&nbsp; First, it is possible that a client could crash with
+open handles, and no other requests come in to the server.&nbsp; Therefore
+the time-out function never gets run and those resources are not released
+(until a request does come in).&nbsp; Similarly, this time-out is not exact.&nbsp;
+The time-out function uses its hint and if it computes a hint on one run,
+an earlier time-out might be created before that time-out expires.&nbsp;
+This issue simply yields a handle that doesn't get released until that
+original hint expires.&nbsp; To illustrate, consider that at the time that
+the time-out function is run, the youngest time-out is 5 minutes in the
+future.&nbsp; Soon after, a new environment is opened that has a time-out
+of 1 minute.&nbsp; If this environment becomes idle (and other operations
+are going on), the time-out function will not release that environment
+until the original 5 minute hint expires.&nbsp; This is not a problem since
+the resources will eventually be released.
+<p>On a similar note, if a client crashes during an RPC, our reply generates
+a SIGPIPE, and our server crashes unless we catch it.&nbsp; Using <i>signal(SIGPIPE,
+SIG_IGN) </i>we can ignore it, and the server will go on.&nbsp; This is
+a call&nbsp; in our <i>main()</i> function that we write.&nbsp; Eventually
+this client's handles would be timed out as described above.&nbsp; We need
+this only for the unfortunate window of a client crashing during the RPC.
+<p>The options below are primarily for control of the program itself,.&nbsp;
+Details relating to databases and environments should be passed from the
+client to the server, since the server can serve many clients, many environments
+and many databases.&nbsp; Therefore it makes more sense for the client
+to set the cache size of its own environment, rather than setting a default
+cachesize on the server that applies as a blanket to any environment it
+may be called upon to open.&nbsp; Options are:
+<ul>
+<li>
+<b>-t&nbsp;</b> to set the default time-out given to an environment.</li>
+
+<li>
+<b>-T</b> to set the maximum time-out allowed for the server.</li>
+
+<li>
+<b>-L</b> to log the execution of the server process to a specified file.</li>
+
+<li>
+<b>-v</b> to run in verbose mode.</li>
+
+<li>
+<b>-M</b>&nbsp; to specify the maximum number of outstanding child server
+processes/threads we can have at any given time.&nbsp; The default is 10.
+<b>[We
+are not yet doing multiple threads/processes.]</b></li>
+</ul>
+
+<h2>
+The Client Code</h2>
+The client code contains all of the supported functions and methods used
+in this model.&nbsp; There are several methods in the <i>__db_env
+</i>and
+<i>__db</i>
+structures that currently do not apply, such as the callbacks.&nbsp; Those
+fields that are not applicable to the client model point to NULL to notify
+the user of their error.&nbsp; Some method functions remain unchanged,
+as well such as the error calls.
+<p>The client code contains each method function that goes along with the
+<a href="#Remote Procedure Calls">RPC
+calls</a> described elsewhere.&nbsp; The client library also contains its
+own version of <a href="../docs/api_c/env_create.html">db_env_create()</a>,
+which does not result in any messages going over to the server (since we
+do not yet know what server we are talking to).&nbsp; This function sets
+up the pointers to the correct client functions.
+<p>All of the method functions that handle the messaging have a basic flow
+similar to this:
+<ul>
+<li>
+Local arg parsing that may be needed</li>
+
+<li>
+Marshalling the message header and the arguments we need to send to the
+server</li>
+
+<li>
+Sending the message</li>
+
+<li>
+Receiving a reply</li>
+
+<li>
+Unmarshalling the reply</li>
+
+<li>
+Local results processing that may be needed</li>
+</ul>
+
+<h2>
+Generated Code</h2>
+Almost all of the code is generated from a source file describing the interface
+and an <i>awk</i> script.&nbsp;&nbsp; This awk script generates six (6)
+files for us.&nbsp; It also modifies one.&nbsp; The files are:
+<ol>
+<li>
+Client file - The C source file created containing the client code.</li>
+
+<li>
+Client template file - The C template source file created containing interfaces
+for handling client-local issues such as resource allocation, but with
+a consistent interface with the client code generated.</li>
+
+<li>
+Server file - The C source file created containing the server code.</li>
+
+<li>
+Server template file - The C template source file created containing interfaces
+for handling server-local issues such as resource allocation, calling into
+the DB library but with a consistent interface with the server code generated.</li>
+
+<li>
+XDR file - The XDR message description file created.</li>
+
+<li>
+Server sed file - A sed script that contains commands to apply to the server
+procedure file (i.e. the real source file that the server template file
+becomes) so that minor interface changes can be consistently and easily
+applied to the real code.</li>
+
+<li>
+Server procedure file - This is the file that is modified by the sed script
+generated.&nbsp; It originated from the server template file.</li>
+</ol>
+The awk script reads a source file, <i>db_server/rpc.src </i>that describes
+each operation and what sorts of arguments it takes and what it returns
+from the server.&nbsp; The syntax of the source file describes the interface
+to that operation.&nbsp; There are four (4) parts to the syntax:
+<ol>
+<li>
+<b>BEGIN</b> <b><i>function version# codetype</i></b> - begins a new functional
+interface for the given <b><i>function</i></b>.&nbsp; Each function has
+a <b><i>version number</i></b>, currently all of them are at version number
+one (1).&nbsp; The <b><i>code type</i></b> indicates to the awk script
+what kind of code to generate.&nbsp; The choices are:</li>
+
+<ul>
+<li>
+<b>CODE </b>- Generate all code, and return a status value.&nbsp; If specified,
+the client code will simply return the status to the user upon completion
+of the RPC call.</li>
+
+<li>
+<b>RETCODE </b>- Generate all code and call a return function in the client
+template file to deal with client issues or with other returned items.&nbsp;
+If specified, the client code generated will call a function of the form
+<i>__dbcl_&lt;name>_ret()
+</i>where
+&lt;name> is replaced with the function name given here.&nbsp; This function
+is placed in the template file because this indicates that something special
+must occur on return.&nbsp; The arguments to this function are the same
+as those for the client function, with the addition of the reply message
+structure.</li>
+
+<li>
+<b>NOCLNTCODE - </b>Generate XDR and server code, but no corresponding
+client code. (This is used for functions that are not named the same thing
+on both sides.&nbsp; The only use of this at the moment is db_env_create
+and db_create.&nbsp; The environment create call to the server is actually
+called from the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
+method.&nbsp; The db_create code exists elsewhere in the library and we
+modify that code for the client call.)</li>
+</ul>
+
+<li>
+<b>ARG <i>RPC-type C-type varname [list-type]</i></b>- each line of this
+describes an argument to the function.&nbsp; The argument is called <b><i>varname</i></b>.&nbsp;
+The <b><i>C-type</i></b> given is what it should look like in the C code
+generated, such as <b>DB *, u_int32_t, const char *</b>.&nbsp; The
+<b><i>RPC-type</i></b>
+is an indication about how the RPC request message should be constructed.&nbsp;
+The RPC-types allowed are described below.</li>
+
+<li>
+<b>RET <i>RPC-type C-type varname [list-type]</i></b>- each line of this
+describes what the server should return from this procedure call (in addition
+to a status, which is always returned and should not be specified).&nbsp;
+The argument is called <b><i>varname</i></b>.&nbsp; The <b><i>C-type</i></b>
+given is what it should look like in the C code generated, such as <b>DB
+*, u_int32_t, const char *</b>.&nbsp; The <b><i>RPC-type</i></b> is an
+indication about how the RPC reply message should be constructed.&nbsp;
+The RPC-types are described below.</li>
+
+<li>
+<b>END </b>- End the description of this function.&nbsp; The result is
+that when the awk script encounters the <b>END</b> tag, it now has all
+the information it needs to construct the generated code for this function.</li>
+</ol>
+The <b><i>RPC-type</i></b> must be one of the following:
+<ul>
+<li>
+<b>IGNORE </b>- This argument is not passed to the server and should be
+ignored when constructing the XDR code.&nbsp; <b>Only allowed for an ARG
+specfication.</b></li>
+
+<li>
+<b>STRING</b> - This argument is a string.</li>
+
+<li>
+<b>INT </b>- This argument is an integer of some sort.</li>
+
+<li>
+<b>DBT </b>- This argument is a DBT, resulting in its decomposition into
+the request message.</li>
+
+<li>
+<b>LIST</b> - This argument is an opaque list passed to the server (NULL-terminated).&nbsp;
+If an argument of this type is given, it must have a <b><i>list-type</i></b>
+specified that is one of:</li>
+
+<ul>
+<li>
+<b>STRING</b></li>
+
+<li>
+<b>INT</b></li>
+
+<li>
+<b>ID</b>.</li>
+</ul>
+
+<li>
+<b>ID</b> - This argument is an identifier.</li>
+</ul>
+So, for example, the source for the DB->join RPC call looks like:
+<pre>BEGIN&nbsp;&nbsp; dbjoin&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; RETCODE
+ARG&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; DB *&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; LIST&nbsp;&nbsp;&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curs&nbsp;&nbsp;&nbsp; ID
+ARG&nbsp;&nbsp;&nbsp;&nbsp; IGNORE&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcpp&nbsp;
+ARG&nbsp;&nbsp;&nbsp;&nbsp; INT&nbsp;&nbsp;&nbsp;&nbsp; u_int32_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags
+RET&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; long&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcid
+END</pre>
+Our first line tells us we are writing the dbjoin function.&nbsp; It requires
+special code on the client so we indicate that with the RETCODE.&nbsp;
+This method takes four arguments.&nbsp; For the RPC request we need the
+database ID from the dbp, we construct a NULL-terminated list of IDs for
+the cursor list, we ignore the argument to return the cursor handle to
+the user, and we pass along the flags.&nbsp; On the return, the reply contains
+a status, by default, and additionally, it contains the ID of the newly
+created cursor.
+<h2>
+Building and Installing</h2>
+I need to verify with Don Anderson, but I believe we should just build
+the server program, just like we do for db_stat, db_checkpoint, etc.&nbsp;
+Basically it can be treated as a utility program from the building and
+installation perspective.
+<p>As mentioned early on, in the section on <a href="#DB Modifications">DB
+Modifications</a>, we have a single library, but allowing the user to access
+the client portion by sending a flag to <a href="../docs/api_c/env_create.html">db_env_create()</a>.&nbsp;
+The Makefile is modified to include the new files.
+<p>Testing is performed in two ways.&nbsp; First I have a new example program,
+that should become part of the example directory.&nbsp; It is basically
+a merging of ex_access.c and ex_env.c.&nbsp; This example is adequate to
+test basic functionality, as it does just does database put/get calls and
+appropriate open and close calls.&nbsp; However, in order to test the full
+set of functions a more generalized scheme is required.&nbsp; For the moment,
+I am going to modify the Tcl interface to accept the server information.&nbsp;
+Nothing else should need to change in Tcl.&nbsp; Then we can either write
+our own test modules or use a subset of the existing ones to test functionality
+on a regular basis.
+</body>
+</html>
diff --git a/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp
new file mode 100644
index 00000000000..25278273555
--- /dev/null
+++ b/storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp
@@ -0,0 +1,2200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id: db_server_cxxproc.cpp,v 1.12 2002/08/09 01:56:08 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+}
+
+/* BEGIN __env_cachesize_proc */
+extern "C" void
+__env_cachesize_proc(
+ long dbenvcl_id,
+ u_int32_t gbytes,
+ u_int32_t bytes,
+ u_int32_t ncache,
+ __env_cachesize_reply *replyp)
+/* END __env_cachesize_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+extern "C" void
+__env_close_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __env_close_reply *replyp)
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+extern "C" void
+__env_create_proc(
+ u_int32_t timeout,
+ __env_create_reply *replyp)
+/* END __env_create_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *ctp;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+extern "C" void
+__env_dbremove_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __env_dbremove_reply *replyp)
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+void
+__env_dbrename_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __env_dbrename_reply *replyp)
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+extern "C" void
+__env_encrypt_proc(
+ long dbenvcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __env_encrypt_reply *replyp)
+/* END __env_encrypt_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+extern "C" void
+__env_flags_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ u_int32_t onoff,
+ __env_flags_reply *replyp)
+/* END __env_flags_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+extern "C" void
+__env_open_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ u_int32_t mode,
+ __env_open_reply *replyp)
+/* END __env_open_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+extern "C" void
+__env_remove_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ __env_remove_reply *replyp)
+/* END __env_remove_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+extern "C" void
+__txn_abort_proc(
+ long txnpcl_id,
+ __txn_abort_reply *replyp)
+/* END __txn_abort_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort();
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+extern "C" void
+__txn_begin_proc(
+ long dbenvcl_id,
+ long parentcl_id,
+ u_int32_t flags,
+ __txn_begin_reply *replyp)
+/* END __txn_begin_proc */
+{
+ DbEnv *dbenv;
+ DbTxn *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DbTxn *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+extern "C" void
+__txn_commit_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_commit_reply *replyp)
+/* END __txn_commit_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+extern "C" void
+__txn_discard_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_discard_reply *replyp)
+/* END __txn_discard_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+extern "C" void
+__txn_prepare_proc(
+ long txnpcl_id,
+ u_int8_t *gid,
+ __txn_prepare_reply *replyp)
+/* END __txn_prepare_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+extern "C" void
+__txn_recover_proc(
+ long dbenvcl_id,
+ u_int32_t count,
+ u_int32_t flags,
+ __txn_recover_reply *replyp,
+ int * freep)
+/* END __txn_recover_proc */
+{
+ DbEnv *dbenv;
+ DbPreplist *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ char *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount; // TODO: fix C++ txn_recover
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ __os_free(dbenv->get_DB_ENV(), replyp->gid.gid_val);
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+extern "C" void
+__db_bt_maxkey_proc(
+ long dbpcl_id,
+ u_int32_t maxkey,
+ __db_bt_maxkey_reply *replyp)
+/* END __db_bt_maxkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+extern "C" void
+__db_associate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ long sdbpcl_id,
+ u_int32_t flags,
+ __db_associate_reply *replyp)
+/* END __db_associate_proc */
+{
+ Db *dbp, *sdbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (Db *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+extern "C" void
+__db_bt_minkey_proc(
+ long dbpcl_id,
+ u_int32_t minkey,
+ __db_bt_minkey_reply *replyp)
+/* END __db_bt_minkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+extern "C" void
+__db_close_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_close_reply *replyp)
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+extern "C" void
+__db_create_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __db_create_reply *replyp)
+/* END __db_create_proc */
+{
+ Db *dbp;
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ dbp = new Db(dbenv, flags);
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+extern "C" void
+__db_del_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_del_reply *replyp)
+/* END __db_del_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->del(txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+extern "C" void
+__db_encrypt_proc(
+ long dbpcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __db_encrypt_reply *replyp)
+/* END __db_encrypt_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+extern "C" void
+__db_extentsize_proc(
+ long dbpcl_id,
+ u_int32_t extentsize,
+ __db_extentsize_reply *replyp)
+/* END __db_extentsize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+extern "C" void
+__db_flags_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_flags_reply *replyp)
+/* END __db_flags_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(flags);
+ dbp_ctp->ct_dbdp.setflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+extern "C" void
+__db_get_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_get_reply *replyp,
+ int * freep)
+/* END __db_get_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE) {
+ if (data.get_data() == 0) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ dataulen, &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+extern "C" void
+__db_h_ffactor_proc(
+ long dbpcl_id,
+ u_int32_t ffactor,
+ __db_h_ffactor_reply *replyp)
+/* END __db_h_ffactor_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+extern "C" void
+__db_h_nelem_proc(
+ long dbpcl_id,
+ u_int32_t nelem,
+ __db_h_nelem_reply *replyp)
+/* END __db_h_nelem_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+extern "C" void
+__db_key_range_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_key_range_reply *replyp)
+/* END __db_key_range_proc */
+{
+ Db *dbp;
+ DB_KEY_RANGE range;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->key_range(txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+extern "C" void
+__db_lorder_proc(
+ long dbpcl_id,
+ u_int32_t lorder,
+ __db_lorder_reply *replyp)
+/* END __db_lorder_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+extern "C" void
+__db_open_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t type,
+ u_int32_t flags,
+ u_int32_t mode,
+ __db_open_reply *replyp)
+/* END __db_open_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, (DBTYPE)type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(&dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->get_DB()->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(&isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+extern "C" void
+__db_pagesize_proc(
+ long dbpcl_id,
+ u_int32_t pagesize,
+ __db_pagesize_reply *replyp)
+/* END __db_pagesize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+extern "C" void
+__db_pget_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_pget_reply *replyp,
+ int * freep)
+/* END __db_pget_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+extern "C" void
+__db_put_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_put_reply *replyp,
+ int * freep)
+/* END __db_put_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+extern "C" void
+__db_re_delim_proc(
+ long dbpcl_id,
+ u_int32_t delim,
+ __db_re_delim_reply *replyp)
+/* END __db_re_delim_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+extern "C" void
+__db_re_len_proc(
+ long dbpcl_id,
+ u_int32_t len,
+ __db_re_len_reply *replyp)
+/* END __db_re_len_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+extern "C" void
+__db_re_pad_proc(
+ long dbpcl_id,
+ u_int32_t pad,
+ __db_re_pad_reply *replyp)
+/* END __db_re_pad_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+extern "C" void
+__db_remove_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __db_remove_reply *replyp)
+/* END __db_remove_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+extern "C" void
+__db_rename_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __db_rename_reply *replyp)
+/* END __db_rename_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+extern "C" void
+__db_stat_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_stat_reply *replyp,
+ int * freep)
+/* END __db_stat_proc */
+{
+ Db *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(&sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(&type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->get_DB()->dbenv,
+ len * replyp->stats.stats_len, &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = (u_int32_t *)sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->get_DB()->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+extern "C" void
+__db_sync_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_sync_reply *replyp)
+/* END __db_sync_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+extern "C" void
+__db_truncate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_truncate_reply *replyp)
+/* END __db_truncate_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+extern "C" void
+__db_cursor_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_cursor_reply *replyp)
+/* END __db_cursor_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ DbTxn *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+extern "C" void
+__db_join_proc(
+ long dbpcl_id,
+ u_int32_t *curs,
+ u_int32_t curslen,
+ u_int32_t flags,
+ __db_join_reply *replyp)
+/* END __db_join_proc */
+{
+ Db *dbp;
+ Dbc **jcurs, **c;
+ Dbc *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(Dbc *);
+ if ((ret = __os_calloc(dbp->get_DB()->dbenv,
+ curslen + 1, sizeof(Dbc *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->get_DB()->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+extern "C" void
+__dbc_close_proc(
+ long dbccl_id,
+ __dbc_close_reply *replyp)
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+extern "C" void
+__dbc_count_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_count_reply *replyp)
+/* END __dbc_count_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->count(&num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+extern "C" void
+__dbc_del_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_del_reply *replyp)
+/* END __dbc_del_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->del(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+extern "C" void
+__dbc_dup_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_dup_reply *replyp)
+/* END __dbc_dup_proc */
+{
+ Dbc *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->dup(&newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+extern "C" void
+__dbc_get_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_get_reply *replyp,
+ int * freep)
+/* END __dbc_get_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.get_data() == NULL) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_ulen(), &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->get(&key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), key.get_size(),
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), data.get_size(),
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+extern "C" void
+__dbc_pget_proc(
+ long dbccl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_pget_reply *replyp,
+ int * freep)
+/* END __dbc_pget_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->pget(&skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+extern "C" void
+__dbc_put_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_put_reply *replyp,
+ int * freep)
+/* END __dbc_put_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+ DBTYPE dbtype;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbp = (Db *)dbc_ctp->ct_parent->ct_anyp;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->put(&key, &data, flags);
+
+ *freep = 0;
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE)) {
+ ret = dbp->get_type(&dbtype);
+ if (ret == 0 && dbtype == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = (char *)key.get_data();
+ replyp->keydata.keydata_len = key.get_size();
+ }
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp b/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp
new file mode 100644
index 00000000000..60865264c00
--- /dev/null
+++ b/storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp
@@ -0,0 +1,746 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_server_cxxutil.cpp,v 1.8 2002/05/23 07:49:34 mjc Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+#include "dbinc_auto/clib_ext.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+extern int __dbsrv_main __P((void));
+}
+
+static int add_home __P((char *));
+static int add_passwd __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(
+ int argc,
+ char **argv)
+{
+ extern char *optarg;
+ CLIENT *cl;
+ int ch, ret;
+ char *passwd;
+
+ prog = argv[0];
+
+ version_check();
+
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ return (EXIT_FAILURE);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
+ break;
+ case 'T':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ return (EXIT_FAILURE);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(char *prog)
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home] [-P passwd]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(EXIT_FAILURE);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit(EXIT_FAILURE);
+ }
+}
+
+extern "C" void
+__dbsrv_settimeout(
+ ct_entry *ctp,
+ u_int32_t to)
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+extern "C" void
+__dbsrv_timeout(int force)
+{
+ static long to_hint = -1;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)((DbTxn *)ctp->ct_anyp)->abort();
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(ct_entry *parent)
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+extern "C" void
+__dbclear_ctp(ct_entry *ctp)
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(NULL, ctp);
+}
+
+extern "C" void
+__dbdel_ctp(ct_entry *parent)
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+extern "C" ct_entry *
+new_ct_ent(int *errp)
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ memset(ctp, 0, sizeof(ct_entry));
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+extern "C" ct_entry *
+get_tableent(long id)
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_sharedb(ct_entry *db_ctp, const char *name, const char *subdb, DBTYPE type, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_shareenv(ct_entry *env_ctp, home_entry *home, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" void
+__dbsrv_active(ct_entry *ctp)
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+extern "C" int
+__db_close_int(long id, u_int32_t flags)
+{
+ Db *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+extern "C" int
+__dbc_close_int(ct_entry *dbc_ctp)
+{
+ Dbc *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->close();
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+extern "C" int
+__dbenv_close_int(long id, u_int32_t flags, int force)
+{
+ DbEnv *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
+ dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
+
+ ret = dbenv->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(char *home)
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ hp->passwd = NULL;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+static int
+add_passwd(char *passwd)
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
+extern "C" home_entry *
+get_home(char *name)
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp);
+ return (NULL);
+}
+
+static int
+env_recover(char *progname)
+{
+ DbEnv *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(hp->passwd, DB_ENCRYPT_AES);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(hp->home, flags, 0)) != 0) {
+ dbenv->err(ret, "DbEnv->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/storage/bdb/rpc_server/java/DbDispatcher.java b/storage/bdb/rpc_server/java/DbDispatcher.java
new file mode 100644
index 00000000000..5c5e63fc2ad
--- /dev/null
+++ b/storage/bdb/rpc_server/java/DbDispatcher.java
@@ -0,0 +1,590 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbDispatcher.java,v 1.5 2002/08/09 01:56:08 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import org.acplt.oncrpc.OncRpcException;
+
+/**
+ * Dispatcher for RPC messages for the Java RPC server.
+ * These are hooks that translate between RPC msg/reply structures and
+ * DB calls, which keeps the real implementation code in Rpc* classes cleaner.
+ */
+public abstract class DbDispatcher extends DbServerStub
+{
+ abstract int addEnv(RpcDbEnv rdbenv);
+ abstract int addDb(RpcDb rdb);
+ abstract int addTxn(RpcDbTxn rtxn);
+ abstract int addCursor(RpcDbc rdbc);
+ abstract void delEnv(RpcDbEnv rdbenv);
+ abstract void delDb(RpcDb rdb);
+ abstract void delTxn(RpcDbTxn rtxn);
+ abstract void delCursor(RpcDbc rdbc);
+ abstract RpcDbEnv getEnv(int envid);
+ abstract RpcDb getDb(int dbid);
+ abstract RpcDbTxn getTxn(int txnbid);
+ abstract RpcDbc getCursor(int dbcid);
+
+ public DbDispatcher() throws IOException, OncRpcException
+ {
+ super();
+ }
+
+ //// Db methods
+
+ public __db_associate_reply __DB_db_associate_4001(__db_associate_msg args)
+ {
+ __db_associate_reply reply = new __db_associate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.associate(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg args)
+ {
+ __db_bt_maxkey_reply reply = new __db_bt_maxkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_maxkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg args)
+ {
+ __db_bt_minkey_reply reply = new __db_bt_minkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_minkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_close_reply __DB_db_close_4001(__db_close_msg args)
+ {
+ __db_close_reply reply = new __db_close_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.close(this, args, reply);
+ return reply;
+ }
+
+ public __db_create_reply __DB_db_create_4001(__db_create_msg args)
+ {
+ __db_create_reply reply = new __db_create_reply();
+ RpcDb rdb = new RpcDb(getEnv(args.dbenvcl_id));
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.create(this, args, reply);
+ return reply;
+ }
+
+ public __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg args)
+ {
+ __db_cursor_reply reply = new __db_cursor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.cursor(this, args, reply);
+ return reply;
+ }
+
+ public __db_del_reply __DB_db_del_4001(__db_del_msg args)
+ {
+ __db_del_reply reply = new __db_del_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.del(this, args, reply);
+ return reply;
+ }
+
+ public __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg args)
+ {
+ __db_encrypt_reply reply = new __db_encrypt_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg args)
+ {
+ __db_extentsize_reply reply = new __db_extentsize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_q_extentsize(this, args, reply);
+ return reply;
+ }
+
+ public __db_flags_reply __DB_db_flags_4001(__db_flags_msg args)
+ {
+ __db_flags_reply reply = new __db_flags_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __db_get_reply __DB_db_get_4001(__db_get_msg args)
+ {
+ __db_get_reply reply = new __db_get_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.get(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg args)
+ {
+ __db_h_ffactor_reply reply = new __db_h_ffactor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_ffactor(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg args)
+ {
+ __db_h_nelem_reply reply = new __db_h_nelem_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_nelem(this, args, reply);
+ return reply;
+ }
+
+ public __db_join_reply __DB_db_join_4001(__db_join_msg args)
+ {
+ __db_join_reply reply = new __db_join_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.join(this, args, reply);
+ return reply;
+ }
+
+ public __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg args)
+ {
+ __db_key_range_reply reply = new __db_key_range_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.key_range(this, args, reply);
+ return reply;
+ }
+
+ public __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg args)
+ {
+ __db_lorder_reply reply = new __db_lorder_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_lorder(this, args, reply);
+ return reply;
+ }
+
+ public __db_open_reply __DB_db_open_4001(__db_open_msg args)
+ {
+ __db_open_reply reply = new __db_open_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.open(this, args, reply);
+ return reply;
+ }
+
+ public __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg args)
+ {
+ __db_pagesize_reply reply = new __db_pagesize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_pagesize(this, args, reply);
+ return reply;
+ }
+
+ public __db_pget_reply __DB_db_pget_4001(__db_pget_msg args)
+ {
+ __db_pget_reply reply = new __db_pget_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.pget(this, args, reply);
+ return reply;
+ }
+
+ public __db_put_reply __DB_db_put_4001(__db_put_msg args)
+ {
+ __db_put_reply reply = new __db_put_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.put(this, args, reply);
+ return reply;
+ }
+
+ public __db_remove_reply __DB_db_remove_4001(__db_remove_msg args)
+ {
+ __db_remove_reply reply = new __db_remove_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.remove(this, args, reply);
+ return reply;
+ }
+
+ public __db_rename_reply __DB_db_rename_4001(__db_rename_msg args)
+ {
+ __db_rename_reply reply = new __db_rename_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.rename(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg args)
+ {
+ __db_re_delim_reply reply = new __db_re_delim_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_delim(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg args)
+ {
+ __db_re_len_reply reply = new __db_re_len_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_len(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg args)
+ {
+ __db_re_pad_reply reply = new __db_re_pad_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_pad(this, args, reply);
+ return reply;
+ }
+
+ public __db_stat_reply __DB_db_stat_4001(__db_stat_msg args)
+ {
+ __db_stat_reply reply = new __db_stat_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.stat(this, args, reply);
+ return reply;
+ }
+
+ public __db_sync_reply __DB_db_sync_4001(__db_sync_msg args)
+ {
+ __db_sync_reply reply = new __db_sync_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.sync(this, args, reply);
+ return reply;
+ }
+
+ public __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg args)
+ {
+ __db_truncate_reply reply = new __db_truncate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.truncate(this, args, reply);
+ return reply;
+ }
+
+ //// Cursor methods
+
+ public __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg args)
+ {
+ __dbc_close_reply reply = new __dbc_close_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.close(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg args)
+ {
+ __dbc_count_reply reply = new __dbc_count_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.count(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg args)
+ {
+ __dbc_del_reply reply = new __dbc_del_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.del(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg args)
+ {
+ __dbc_dup_reply reply = new __dbc_dup_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.dup(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg args)
+ {
+ __dbc_get_reply reply = new __dbc_get_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.get(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg args) {
+ __dbc_pget_reply reply = new __dbc_pget_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.pget(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg args) {
+ __dbc_put_reply reply = new __dbc_put_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.put(this, args, reply);
+ return reply;
+ }
+
+ //// Environment methods
+
+ public __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg args)
+ {
+ __env_cachesize_reply reply = new __env_cachesize_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_cachesize(this, args, reply);
+ return reply;
+ }
+
+ public __env_close_reply __DB_env_close_4001(__env_close_msg args)
+ {
+ __env_close_reply reply = new __env_close_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.close(this, args, reply);
+ return reply;
+ }
+
+ public __env_create_reply __DB_env_create_4001(__env_create_msg args)
+ {
+ __env_create_reply reply = new __env_create_reply();
+ RpcDbEnv rdbenv = new RpcDbEnv();
+ rdbenv.create(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg args)
+ {
+ __env_dbremove_reply reply = new __env_dbremove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbremove(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg args)
+ {
+ __env_dbrename_reply reply = new __env_dbrename_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbrename(this, args, reply);
+ return reply;
+ }
+
+ public __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg args)
+ {
+ __env_encrypt_reply reply = new __env_encrypt_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __env_flags_reply __DB_env_flags_4001(__env_flags_msg args)
+ {
+ __env_flags_reply reply = new __env_flags_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __env_open_reply __DB_env_open_4001(__env_open_msg args)
+ {
+ __env_open_reply reply = new __env_open_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.open(this, args, reply);
+ return reply;
+ }
+
+ public __env_remove_reply __DB_env_remove_4001(__env_remove_msg args)
+ {
+ __env_remove_reply reply = new __env_remove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.remove(this, args, reply);
+ return reply;
+ }
+
+ //// Transaction methods
+
+ public __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg args)
+ {
+ __txn_abort_reply reply = new __txn_abort_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.abort(this, args, reply);
+ return reply;
+ }
+
+ public __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg args)
+ {
+ __txn_begin_reply reply = new __txn_begin_reply();
+ RpcDbTxn rdbtxn = new RpcDbTxn(getEnv(args.dbenvcl_id), null);
+ rdbtxn.begin(this, args, reply);
+ return reply;
+ }
+
+ public __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg args)
+ {
+ __txn_commit_reply reply = new __txn_commit_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.commit(this, args, reply);
+ return reply;
+ }
+
+ public __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg args)
+ {
+ __txn_discard_reply reply = new __txn_discard_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.discard(this, args, reply);
+ return reply;
+ }
+
+ public __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg args)
+ {
+ __txn_prepare_reply reply = new __txn_prepare_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.prepare(this, args, reply);
+ return reply;
+ }
+
+ public __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg args)
+ {
+ __txn_recover_reply reply = new __txn_recover_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.txn_recover(this, args, reply);
+ return reply;
+ }
+}
diff --git a/storage/bdb/rpc_server/java/DbServer.java b/storage/bdb/rpc_server/java/DbServer.java
new file mode 100644
index 00000000000..9b20becbcdc
--- /dev/null
+++ b/storage/bdb/rpc_server/java/DbServer.java
@@ -0,0 +1,301 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbServer.java,v 1.5 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.*;
+import java.util.*;
+import org.acplt.oncrpc.OncRpcException;
+import org.acplt.oncrpc.server.OncRpcCallInformation;
+
+/**
+ * Main entry point for the Java version of the Berkeley DB RPC server
+ */
+public class DbServer extends DbDispatcher
+{
+ public static long idleto = 10 * 60 * 1000; // 5 minutes
+ public static long defto = 5 * 60 * 1000; // 5 minutes
+ public static long maxto = 60 * 60 * 1000; // 1 hour
+ public static String passwd = null;
+ public static PrintWriter err;
+
+ long now, hint; // updated each operation
+ FreeList env_list = new FreeList();
+ FreeList db_list = new FreeList();
+ FreeList txn_list = new FreeList();
+ FreeList cursor_list = new FreeList();
+
+ public DbServer() throws IOException, OncRpcException
+ {
+ super();
+ init_lists();
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program,
+ int version, int procedure) throws OncRpcException, IOException
+ {
+ long newnow = System.currentTimeMillis();
+ // DbServer.err.println("Dispatching RPC call " + procedure + " after delay of " + (newnow - now));
+ now = newnow;
+ // DbServer.err.flush();
+ super.dispatchOncRpcCall(call, program, version, procedure);
+
+ try {
+ doTimeouts();
+ } catch(Throwable t) {
+ System.err.println("Caught " + t + " during doTimeouts()");
+ t.printStackTrace(System.err);
+ }
+ }
+
+ // Internal methods to track context
+ private void init_lists()
+ {
+ // We do this so that getEnv/Db/etc(0) == null
+ env_list.add(null);
+ db_list.add(null);
+ txn_list.add(null);
+ cursor_list.add(null);
+ }
+
+ int addEnv(RpcDbEnv rdbenv)
+ {
+ rdbenv.timer.last_access = now;
+ int id = env_list.add(rdbenv);
+ return id;
+ }
+
+ int addDb(RpcDb rdb)
+ {
+ int id = db_list.add(rdb);
+ return id;
+ }
+
+ int addTxn(RpcDbTxn rtxn)
+ {
+ rtxn.timer.last_access = now;
+ int id = txn_list.add(rtxn);
+ return id;
+ }
+
+ int addCursor(RpcDbc rdbc)
+ {
+ rdbc.timer.last_access = now;
+ int id = cursor_list.add(rdbc);
+ return id;
+ }
+
+ void delEnv(RpcDbEnv rdbenv)
+ {
+ // cursors and transactions will already have been cleaned up
+ for(LocalIterator i = db_list.iterator(); i.hasNext(); ) {
+ RpcDb rdb = (RpcDb)i.next();
+ if (rdb != null && rdb.rdbenv == rdbenv)
+ delDb(rdb);
+ }
+
+ env_list.del(rdbenv);
+ rdbenv.dispose();
+ }
+
+ void delDb(RpcDb rdb)
+ {
+ db_list.del(rdb);
+ rdb.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rdb)
+ i.remove();
+ }
+ }
+
+ void delTxn(RpcDbTxn rtxn)
+ {
+ txn_list.del(rtxn);
+ rtxn.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rtxn)
+ i.remove();
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn_child = (RpcDbTxn)i.next();
+ if (rtxn_child != null && rtxn_child.timer == rtxn)
+ i.remove();
+ }
+ }
+
+ void delCursor(RpcDbc rdbc)
+ {
+ cursor_list.del(rdbc);
+ rdbc.dispose();
+ }
+
+ RpcDbEnv getEnv(int envid)
+ {
+ RpcDbEnv rdbenv = (RpcDbEnv)env_list.get(envid);
+ if (rdbenv != null)
+ rdbenv.timer.last_access = now;
+ return rdbenv;
+ }
+
+ RpcDb getDb(int dbid)
+ {
+ RpcDb rdb = (RpcDb)db_list.get(dbid);
+ if (rdb != null)
+ rdb.rdbenv.timer.last_access = now;
+ return rdb;
+ }
+
+ RpcDbTxn getTxn(int txnid)
+ {
+ RpcDbTxn rtxn = (RpcDbTxn)txn_list.get(txnid);
+ if (rtxn != null)
+ rtxn.timer.last_access = rtxn.rdbenv.timer.last_access = now;
+ return rtxn;
+ }
+
+ RpcDbc getCursor(int dbcid)
+ {
+ RpcDbc rdbc = (RpcDbc)cursor_list.get(dbcid);
+ if (rdbc != null)
+ rdbc.last_access = rdbc.timer.last_access = rdbc.rdbenv.timer.last_access = now;
+ return rdbc;
+ }
+
+ void doTimeouts()
+ {
+ if (now < hint) {
+ // DbServer.err.println("Skipping cleaner sweep - now = " + now + ", hint = " + hint);
+ return;
+ }
+
+ // DbServer.err.println("Starting a cleaner sweep");
+ hint = now + DbServer.maxto;
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc == null)
+ continue;
+
+ long end_time = rdbc.timer.last_access + rdbc.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rdbc + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbc);
+ delCursor(rdbc);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn = (RpcDbTxn)i.next();
+ if (rtxn == null)
+ continue;
+
+ long end_time = rtxn.timer.last_access + rtxn.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rtxn + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rtxn);
+ delTxn(rtxn);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = env_list.iterator(); i.hasNext(); ) {
+ RpcDbEnv rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv == null)
+ continue;
+
+ long end_time = rdbenv.timer.last_access + rdbenv.idletime;
+ // DbServer.err.println("Examining " + rdbenv + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbenv);
+ delEnv(rdbenv);
+ }
+ }
+
+ // if we didn't find anything, reset the hint
+ if (hint == now + DbServer.maxto)
+ hint = 0;
+
+ // DbServer.err.println("Finishing a cleaner sweep");
+ }
+
+ // Some constants that aren't available elsewhere
+ static final int DB_SERVER_FLAGMASK = Db.DB_LOCKDOWN |
+ Db.DB_PRIVATE | Db.DB_RECOVER | Db.DB_RECOVER_FATAL |
+ Db.DB_SYSTEM_MEM | Db.DB_USE_ENVIRON |
+ Db.DB_USE_ENVIRON_ROOT;
+ static final int DB_SERVER_ENVFLAGS = Db.DB_INIT_CDB |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL |
+ Db.DB_INIT_TXN | Db.DB_JOINENV;
+ static final int DB_SERVER_DBFLAGS = Db.DB_DIRTY_READ |
+ Db.DB_NOMMAP | Db.DB_RDONLY;
+ static final int DB_SERVER_DBNOSHARE = Db.DB_EXCL | Db.DB_TRUNCATE;
+
+ public static void main(String[] args)
+ {
+ System.out.println("Starting DbServer...");
+ for (int i = 0; i < args.length; i++) {
+ if (args[i].charAt(0) != '-')
+ usage();
+
+ switch (args[i].charAt(1)) {
+ case 'h':
+ ++i; // add_home(args[++i]);
+ break;
+ case 'I':
+ idleto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'P':
+ passwd = args[++i];
+ break;
+ case 't':
+ defto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'T':
+ maxto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'V':
+ // version;
+ break;
+ case 'v':
+ // verbose
+ break;
+ default:
+ usage();
+ }
+ }
+
+ try {
+ DbServer.err = new PrintWriter(new FileOutputStream("JavaRPCServer.trace", true));
+ DbServer server = new DbServer();
+ server.run();
+ } catch (Throwable e) {
+ System.out.println("DbServer exception:");
+ e.printStackTrace(DbServer.err);
+ } finally {
+ if (DbServer.err != null)
+ DbServer.err.close();
+ }
+
+ System.out.println("DbServer stopped.");
+ }
+
+ static void usage()
+ {
+ System.err.println("usage: java com.sleepycat.db.rpcserver.DbServer \\");
+ System.err.println("[-Vv] [-h home] [-P passwd] [-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ System.exit(1);
+ }
+}
diff --git a/storage/bdb/rpc_server/java/FreeList.java b/storage/bdb/rpc_server/java/FreeList.java
new file mode 100644
index 00000000000..e831c466137
--- /dev/null
+++ b/storage/bdb/rpc_server/java/FreeList.java
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: FreeList.java,v 1.3 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Keep track of a list of objects by id with a free list.
+ * Intentionally package-protected exposure.
+ */
+class FreeList
+{
+ class FreeIndex {
+ int index;
+ FreeIndex(int index) { this.index = index; }
+ int getIndex() { return index; }
+ }
+
+ Vector items = new Vector();
+ FreeIndex free_head = null;
+
+ public synchronized int add(Object obj) {
+ int pos;
+ if (free_head == null) {
+ pos = items.size();
+ items.addElement(obj);
+ if (pos % 1000 == 0)
+ DbServer.err.println(this + " grew to size " + pos);
+ } else {
+ pos = free_head.getIndex();
+ free_head = (FreeIndex)items.elementAt(pos);
+ items.setElementAt(obj, pos);
+ }
+ return pos;
+ }
+
+ public synchronized void del(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj != null && obj instanceof FreeIndex)
+ throw new NoSuchElementException("index " + pos + " has already been freed");
+ items.setElementAt(free_head, pos);
+ free_head = new FreeIndex(pos);
+ }
+
+ public void del(Object obj) {
+ del(items.indexOf(obj));
+ }
+
+ public Object get(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj instanceof FreeIndex)
+ obj = null;
+ return obj;
+ }
+
+ public LocalIterator iterator() {
+ return new FreeListIterator();
+ }
+
+ /**
+ * Iterator for a FreeList. Note that this class doesn't implement
+ * java.util.Iterator to maintain compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+ class FreeListIterator implements LocalIterator {
+ int current;
+
+ FreeListIterator() { current = findNext(-1); }
+
+ private int findNext(int start) {
+ int next = start;
+ while (++next < items.size()) {
+ Object obj = items.elementAt(next);
+ if (obj == null || !(obj instanceof FreeIndex))
+ break;
+ }
+ return next;
+ }
+
+ public boolean hasNext() {
+ return (findNext(current) < items.size());
+ }
+
+ public Object next() {
+ current = findNext(current);
+ if (current == items.size())
+ throw new NoSuchElementException("enumerated past end of FreeList");
+ return items.elementAt(current);
+ }
+
+ public void remove() {
+ del(current);
+ }
+ }
+}
diff --git a/storage/bdb/rpc_server/java/LocalIterator.java b/storage/bdb/rpc_server/java/LocalIterator.java
new file mode 100644
index 00000000000..eecb0b5e78d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/LocalIterator.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: LocalIterator.java,v 1.2 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Iterator interface. Note that this matches java.util.Iterator
+ * but maintains compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+interface LocalIterator {
+ boolean hasNext();
+ Object next();
+ void remove();
+}
diff --git a/storage/bdb/rpc_server/java/README b/storage/bdb/rpc_server/java/README
new file mode 100644
index 00000000000..c2d8f3abd57
--- /dev/null
+++ b/storage/bdb/rpc_server/java/README
@@ -0,0 +1,24 @@
+Berkeley DB Java RPC server, copyright (C) 2002 Sleepycat Software
+
+The Java implementation of the Berkeley DB RPC server is intended
+primarily for testing purposes. It provides the same interface
+as the C and C++ RPC servers, but is implemented via the Java API
+rather than the C or C++ APIs. This allows the existing Tcl test
+suite to exercise the Java API without modification.
+
+The Java RPC server relies on a Java version of rpcgen to
+automatically generate appropriate Java classes from the RPC
+interface specification (../db_server.x). We use jrpcgen, which
+is part of the Remote Tea for Java project:
+ acplt.plt.rwth-aachen.de/ks/english/remotetea.html
+
+To rebuild the Java stubs from db_server.x, you will need to
+download the full Remote Tea package, but if you just want to
+compile the Java sources and run the Java RPC server, the runtime
+component of Remote Tea is included in oncrpc.jar. Building
+the Java RPC server is automatic when Berkeley DB is configured
+with the both --enable-rpc and --enable-java.
+
+All of the Remote Tea project is licensed under the Library GNU
+Public License, and we have made no modifications to their
+released code.
diff --git a/storage/bdb/rpc_server/java/RpcDb.java b/storage/bdb/rpc_server/java/RpcDb.java
new file mode 100644
index 00000000000..59da9be67dc
--- /dev/null
+++ b/storage/bdb/rpc_server/java/RpcDb.java
@@ -0,0 +1,694 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDb.java,v 1.8 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a db object for the Java RPC server.
+ */
+public class RpcDb extends Timer
+{
+ static final byte[] empty = new byte[0];
+ Db db;
+ RpcDbEnv rdbenv;
+ int refcount = 1;
+ String dbname, subdbname;
+ int type, setflags, openflags;
+
+ public RpcDb(RpcDbEnv rdbenv)
+ {
+ this.rdbenv = rdbenv;
+ }
+
+ void dispose()
+ {
+ if (db != null) {
+ try {
+ db.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ db = null;
+ }
+ }
+
+ public void associate(DbDispatcher server,
+ __db_associate_msg args, __db_associate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ db.associate(txn, server.getDb(args.sdbpcl_id).db, null, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __db_close_msg args, __db_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ db.close(args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __db_create_msg args, __db_create_reply reply)
+ {
+ try {
+ db = new Db(server.getEnv(args.dbenvcl_id).dbenv, args.flags);
+ reply.dbcl_id = server.addDb(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void cursor(DbDispatcher server,
+ __db_cursor_msg args, __db_cursor_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbc dbc = db.cursor(txn, args.flags);
+ RpcDbc rdbc = new RpcDbc(this, dbc, false);
+ rdbc.timer = (rtxn != null) ? rtxn.timer : this;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __db_del_msg args, __db_del_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ db.del(txn, key, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __db_get_msg args, __db_get_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ if ((args.flags & Db.DB_MULTIPLE) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = db.get(txn, key, data, args.flags);
+
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void join(DbDispatcher server,
+ __db_join_msg args, __db_join_reply reply)
+ {
+ try {
+ Dbc[] cursors = new Dbc[args.curs.length + 1];
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ if (rdbc == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ cursors[i] = rdbc.dbc;
+ }
+ cursors[args.curs.length] = null;
+
+ Dbc jdbc = db.join(cursors, args.flags);
+
+ RpcDbc rjdbc = new RpcDbc(this, jdbc, true);
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ RpcDbc rdbc0 = server.getCursor(args.curs[0]);
+ if (rdbc0.timer != rdbc0)
+ rjdbc.timer = rdbc0.timer;
+
+ /*
+ * All of the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ */
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ rdbc.orig_timer = rdbc.timer;
+ rdbc.timer = rjdbc;
+ }
+ reply.dbcidcl_id = server.addCursor(rjdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void key_range(DbDispatcher server,
+ __db_key_range_msg args, __db_key_range_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ DbKeyRange range = new DbKeyRange();
+
+ db.key_range(txn, key, range, args.flags);
+ reply.status = 0;
+ reply.less = range.less;
+ reply.equal = range.equal;
+ reply.greater = range.greater;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDb(DbDispatcher server, __db_open_reply reply)
+ throws DbException
+ {
+ RpcDb rdb = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).db_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdb = (RpcDb)i.next();
+ if (rdb != null && rdb != this && rdb.rdbenv == rdbenv &&
+ (type == Db.DB_UNKNOWN || rdb.type == type) &&
+ openflags == rdb.openflags &&
+ setflags == rdb.setflags &&
+ dbname != null && rdb.dbname != null &&
+ dbname.equals(rdb.dbname) &&
+ (subdbname == rdb.subdbname ||
+ (subdbname != null && rdb.subdbname != null &&
+ subdbname.equals(rdb.subdbname))))
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ ++rdb.refcount;
+ reply.dbcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.type = rdb.db.get_type();
+ reply.dbflags = rdb.db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = rdb.db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing Db: " + reply.dbcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __db_open_msg args, __db_open_reply reply)
+ {
+ try {
+ dbname = (args.name.length() > 0) ? args.name : null;
+ subdbname = (args.subdb.length() > 0) ? args.subdb : null;
+ type = args.type;
+ openflags = args.flags & DbServer.DB_SERVER_DBFLAGS;
+
+ if (findSharedDb(server, reply)) {
+ db.close(0);
+ db = null;
+ server.delDb(this);
+ } else {
+ DbServer.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")");
+ db.open(null, dbname, subdbname, args.type, args.flags, args.mode);
+
+ reply.dbcl_id = args.dbpcl_id;
+ reply.type = this.type = db.get_type();
+ reply.dbflags = db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("Db.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id);
+ }
+
+ public void pget(DbDispatcher server,
+ __db_pget_msg args, __db_pget_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ db.pget(txn, skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata ||
+ skey.get_data().length != skey.get_size()) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata ||
+ pkey.get_data().length != pkey.get_size()) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.skeydata = reply.pkeydata = reply.datadata = empty;
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __db_put_msg args, __db_put_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(args.dataflags);
+
+ reply.status = db.put(txn, key, data, args.flags);
+
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if ((args.flags & Db.DB_APPEND) != 0) {
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+ } else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ reply.keydata = empty;
+ reply.status = e.get_errno();
+ DbServer.err.println("Exception, setting status to " + reply.status);
+ e.printStackTrace(DbServer.err);
+ }
+ }
+
+ public void remove(DbDispatcher server,
+ __db_remove_msg args, __db_remove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ db.remove(args.name, args.subdb, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void rename(DbDispatcher server,
+ __db_rename_msg args, __db_rename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+ db.rename(args.name, args.subdb, args.newname, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void set_bt_maxkey(DbDispatcher server,
+ __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply)
+ {
+ try {
+ db.set_bt_maxkey(args.maxkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_bt_minkey(DbDispatcher server,
+ __db_bt_minkey_msg args, __db_bt_minkey_reply reply)
+ {
+ try {
+ db.set_bt_minkey(args.minkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __db_encrypt_msg args, __db_encrypt_reply reply)
+ {
+ try {
+ db.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __db_flags_msg args, __db_flags_reply reply)
+ {
+ try {
+ // DbServer.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")");
+ db.set_flags(args.flags);
+ setflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_ffactor(DbDispatcher server,
+ __db_h_ffactor_msg args, __db_h_ffactor_reply reply)
+ {
+ try {
+ db.set_h_ffactor(args.ffactor);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_nelem(DbDispatcher server,
+ __db_h_nelem_msg args, __db_h_nelem_reply reply)
+ {
+ try {
+ db.set_h_nelem(args.nelem);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_lorder(DbDispatcher server,
+ __db_lorder_msg args, __db_lorder_reply reply)
+ {
+ try {
+ db.set_lorder(args.lorder);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_pagesize(DbDispatcher server,
+ __db_pagesize_msg args, __db_pagesize_reply reply)
+ {
+ try {
+ db.set_pagesize(args.pagesize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_q_extentsize(DbDispatcher server,
+ __db_extentsize_msg args, __db_extentsize_reply reply)
+ {
+ try {
+ db.set_q_extentsize(args.extentsize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_delim(DbDispatcher server,
+ __db_re_delim_msg args, __db_re_delim_reply reply)
+ {
+ try {
+ db.set_re_delim(args.delim);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_len(DbDispatcher server,
+ __db_re_len_msg args, __db_re_len_reply reply)
+ {
+ try {
+ db.set_re_len(args.len);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_pad(DbDispatcher server,
+ __db_re_pad_msg args, __db_re_pad_reply reply)
+ {
+ try {
+ db.set_re_pad(args.pad);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void stat(DbDispatcher server,
+ __db_stat_msg args, __db_stat_reply reply)
+ {
+ try {
+ Object raw_stat = db.stat(args.flags);
+
+ if (raw_stat instanceof DbHashStat) {
+ DbHashStat hs = (DbHashStat)raw_stat;
+ int[] raw_stats = {
+ hs.hash_magic, hs.hash_version,
+ hs.hash_metaflags, hs.hash_nkeys,
+ hs.hash_ndata, hs.hash_pagesize,
+ hs.hash_ffactor, hs.hash_buckets,
+ hs.hash_free, hs.hash_bfree,
+ hs.hash_bigpages, hs.hash_big_bfree,
+ hs.hash_overflows, hs.hash_ovfl_free,
+ hs.hash_dup, hs.hash_dup_free
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbQueueStat) {
+ DbQueueStat qs = (DbQueueStat)raw_stat;
+ int[] raw_stats = {
+ qs.qs_magic, qs.qs_version,
+ qs.qs_metaflags, qs.qs_nkeys,
+ qs.qs_ndata, qs.qs_pagesize,
+ qs.qs_extentsize, qs.qs_pages,
+ qs.qs_re_len, qs.qs_re_pad,
+ qs.qs_pgfree, qs.qs_first_recno,
+ qs.qs_cur_recno
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbBtreeStat) {
+ DbBtreeStat bs = (DbBtreeStat)raw_stat;
+ int[] raw_stats = {
+ bs.bt_magic, bs.bt_version,
+ bs.bt_metaflags, bs.bt_nkeys,
+ bs.bt_ndata, bs.bt_pagesize,
+ bs.bt_maxkey, bs.bt_minkey,
+ bs.bt_re_len, bs.bt_re_pad,
+ bs.bt_levels, bs.bt_int_pg,
+ bs.bt_leaf_pg, bs.bt_dup_pg,
+ bs.bt_over_pg, bs.bt_free,
+ bs.bt_int_pgfree, bs.bt_leaf_pgfree,
+ bs.bt_dup_pgfree, bs.bt_over_pgfree
+ };
+ reply.stats = raw_stats;
+ } else
+ throw new DbException("Invalid return type from db.stat()", Db.DB_NOTFOUND);
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.stats = new int[0];
+ }
+ }
+
+ public void sync(DbDispatcher server,
+ __db_sync_msg args, __db_sync_reply reply)
+ {
+ try {
+ db.sync(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void truncate(DbDispatcher server,
+ __db_truncate_msg args, __db_truncate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ reply.count = db.truncate(txn, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/storage/bdb/rpc_server/java/RpcDbEnv.java b/storage/bdb/rpc_server/java/RpcDbEnv.java
new file mode 100644
index 00000000000..9d9f1ba4324
--- /dev/null
+++ b/storage/bdb/rpc_server/java/RpcDbEnv.java
@@ -0,0 +1,269 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbEnv.java,v 1.6 2002/08/23 08:45:59 mjc Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbenv for the Java RPC server.
+ */
+public class RpcDbEnv extends Timer
+{
+ DbEnv dbenv;
+ String home;
+ long idletime, timeout;
+ int openflags, onflags, offflags;
+ int refcount = 1;
+
+ void dispose()
+ {
+ if (dbenv != null) {
+ try {
+ dbenv.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbenv = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __env_close_msg args, __env_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ dbenv.close(args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __env_create_msg args, __env_create_reply reply)
+ {
+ this.idletime = (args.timeout != 0) ? args.timeout : DbServer.idleto;
+ this.timeout = DbServer.defto;
+ try {
+ dbenv = new DbEnv(0);
+ reply.envcl_id = server.addEnv(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbremove(DbDispatcher server,
+ __env_dbremove_msg args, __env_dbremove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbremove(txn, args.name, args.subdb, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbrename(DbDispatcher server,
+ __env_dbrename_msg args, __env_dbrename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbrename(txn, args.name, args.subdb, args.newname, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDbEnv(DbDispatcher server, __env_open_reply reply)
+ throws DbException
+ {
+ RpcDbEnv rdbenv = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).env_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv != null && rdbenv != this &&
+ (home == rdbenv.home ||
+ (home != null && home.equals(rdbenv.home))) &&
+ openflags == rdbenv.openflags &&
+ onflags == rdbenv.onflags &&
+ offflags == rdbenv.offflags)
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ /*
+ * The only thing left to check is the timeout.
+ * Since the server timeout set by the client is a hint, for sharing
+ * we'll give them the benefit of the doubt and grant them the
+ * longer timeout.
+ */
+ if (rdbenv.timeout < timeout)
+ rdbenv.timeout = timeout;
+
+ ++rdbenv.refcount;
+ reply.envcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing DbEnv: " + reply.envcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __env_open_msg args, __env_open_reply reply)
+ {
+ try {
+ home = (args.home.length() > 0) ? args.home : null;
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((args.flags & Db.DB_INIT_LOCK) != 0)
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+
+ // adjust flags for RPC
+ int newflags = (args.flags & ~DbServer.DB_SERVER_FLAGMASK);
+ openflags = (newflags & DbServer.DB_SERVER_ENVFLAGS);
+
+ if (findSharedDbEnv(server, reply)) {
+ dbenv.close(0);
+ dbenv = null;
+ server.delEnv(this);
+ } else {
+ // TODO: check home?
+ dbenv.open(home, newflags, args.mode);
+ reply.status = 0;
+ reply.envcl_id = args.dbenvcl_id;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("DbEnv.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id);
+ }
+
+ public void remove(DbDispatcher server,
+ __env_remove_msg args, __env_remove_reply reply)
+ {
+ try {
+ args.home = (args.home.length() > 0) ? args.home : null;
+ // TODO: check home?
+
+ dbenv.remove(args.home, args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void set_cachesize(DbDispatcher server,
+ __env_cachesize_msg args, __env_cachesize_reply reply)
+ {
+ try {
+ dbenv.set_cachesize(args.gbytes, args.bytes, args.ncache);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __env_encrypt_msg args, __env_encrypt_reply reply)
+ {
+ try {
+ dbenv.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __env_flags_msg args, __env_flags_reply reply)
+ {
+ try {
+ dbenv.set_flags(args.flags, args.onoff != 0);
+ if (args.onoff != 0)
+ onflags |= args.flags;
+ else
+ offflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ // txn_recover implementation
+ public void txn_recover(DbDispatcher server,
+ __txn_recover_msg args, __txn_recover_reply reply)
+ {
+ try {
+ DbPreplist[] prep_list = dbenv.txn_recover(args.count, args.flags);
+ if (prep_list != null && prep_list.length > 0) {
+ int count = prep_list.length;
+ reply.retcount = count;
+ reply.txn = new int[count];
+ reply.gid = new byte[count * Db.DB_XIDDATASIZE];
+
+ for(int i = 0; i < count; i++) {
+ reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].txn));
+ System.arraycopy(prep_list[i].gid, 0, reply.gid, i * Db.DB_XIDDATASIZE, Db.DB_XIDDATASIZE);
+ }
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/storage/bdb/rpc_server/java/RpcDbTxn.java b/storage/bdb/rpc_server/java/RpcDbTxn.java
new file mode 100644
index 00000000000..a3207b5e35d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/RpcDbTxn.java
@@ -0,0 +1,123 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbTxn.java,v 1.2 2002/08/09 01:56:10 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a txn object for the Java RPC server.
+ */
+public class RpcDbTxn extends Timer
+{
+ RpcDbEnv rdbenv;
+ DbTxn txn;
+
+ public RpcDbTxn(RpcDbEnv rdbenv, DbTxn txn)
+ {
+ this.rdbenv = rdbenv;
+ this.txn = txn;
+ }
+
+ void dispose()
+ {
+ if (txn != null) {
+ try {
+ txn.abort();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ txn = null;
+ }
+ }
+
+ public void abort(DbDispatcher server,
+ __txn_abort_msg args, __txn_abort_reply reply)
+ {
+ try {
+ txn.abort();
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void begin(DbDispatcher server,
+ __txn_begin_msg args, __txn_begin_reply reply)
+ {
+ try {
+ if (rdbenv == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ DbEnv dbenv = rdbenv.dbenv;
+ RpcDbTxn rparent = server.getTxn(args.parentcl_id);
+ DbTxn parent = (rparent != null) ? rparent.txn : null;
+
+ txn = dbenv.txn_begin(parent, args.flags);
+
+ if (rparent != null)
+ timer = rparent.timer;
+ reply.txnidcl_id = server.addTxn(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void commit(DbDispatcher server,
+ __txn_commit_msg args, __txn_commit_reply reply)
+ {
+ try {
+ txn.commit(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void discard(DbDispatcher server,
+ __txn_discard_msg args, __txn_discard_reply reply)
+ {
+ try {
+ txn.discard(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void prepare(DbDispatcher server,
+ __txn_prepare_msg args, __txn_prepare_reply reply)
+ {
+ try {
+ txn.prepare(args.gid);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/storage/bdb/rpc_server/java/RpcDbc.java b/storage/bdb/rpc_server/java/RpcDbc.java
new file mode 100644
index 00000000000..a37b4ee4896
--- /dev/null
+++ b/storage/bdb/rpc_server/java/RpcDbc.java
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbc.java,v 1.3 2002/08/09 01:56:10 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbc object for the Java RPC server.
+ */
+public class RpcDbc extends Timer
+{
+ static final byte[] empty = new byte[0];
+ RpcDbEnv rdbenv;
+ RpcDb rdb;
+ Dbc dbc;
+ Timer orig_timer;
+ boolean isJoin;
+
+ public RpcDbc(RpcDb rdb, Dbc dbc, boolean isJoin)
+ {
+ this.rdb = rdb;
+ this.rdbenv = rdb.rdbenv;
+ this.dbc = dbc;
+ this.isJoin = isJoin;
+ }
+
+ void dispose()
+ {
+ if (dbc != null) {
+ try {
+ dbc.close();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbc = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __dbc_close_msg args, __dbc_close_reply reply)
+ {
+ try {
+ dbc.close();
+ dbc = null;
+
+ if (isJoin)
+ for(LocalIterator i = ((DbServer)server).cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ // Unjoin cursors that were joined to create this
+ if (rdbc != null && rdbc.timer == this)
+ rdbc.timer = rdbc.orig_timer;
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delCursor(this);
+ }
+ }
+
+ public void count(DbDispatcher server,
+ __dbc_count_msg args, __dbc_count_reply reply)
+ {
+ try {
+ reply.dupcount = dbc.count(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __dbc_del_msg args, __dbc_del_reply reply)
+ {
+ try {
+ reply.status = dbc.del(args.flags);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dup(DbDispatcher server,
+ __dbc_dup_msg args, __dbc_dup_reply reply)
+ {
+ try {
+ Dbc newdbc = dbc.dup(args.flags);
+ RpcDbc rdbc = new RpcDbc(rdb, newdbc, false);
+ /* If this cursor has a parent txn, we need to use it too. */
+ if (timer != this)
+ rdbc.timer = timer;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __dbc_get_msg args, __dbc_get_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ if ((args.flags & Db.DB_MULTIPLE) != 0 ||
+ (args.flags & Db.DB_MULTIPLE_KEY) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.get(key, data, args.flags);
+
+ if (key.get_data() == args.keydata) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void pget(DbDispatcher server,
+ __dbc_pget_msg args, __dbc_pget_reply reply)
+ {
+ try {
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.pget(skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __dbc_put_msg args, __dbc_put_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(args.keyflags & Db.DB_DBT_PARTIAL);
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ data.set_flags(args.dataflags);
+
+ reply.status = dbc.put(key, data, args.flags);
+
+ if (reply.status == 0 &&
+ (args.flags == Db.DB_AFTER || args.flags == Db.DB_BEFORE) &&
+ rdb.db.get_type() == Db.DB_RECNO)
+ reply.keydata = key.get_data();
+ else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = empty;
+ }
+ }
+}
diff --git a/storage/bdb/rpc_server/java/Timer.java b/storage/bdb/rpc_server/java/Timer.java
new file mode 100644
index 00000000000..e16f3084f95
--- /dev/null
+++ b/storage/bdb/rpc_server/java/Timer.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: Timer.java,v 1.1 2002/01/03 02:59:39 mjc Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+/**
+ * Class to keep track of access times. This is slightly devious by having
+ * both the access_time and a reference to another Timer that can be
+ * used to group/share access times. This is done to keep the Java code
+ * close to the canonical C implementation of the RPC server.
+ */
+public class Timer
+{
+ Timer timer = this;
+ long last_access;
+}
diff --git a/storage/bdb/rpc_server/java/gen/DbServerStub.java b/storage/bdb/rpc_server/java/gen/DbServerStub.java
new file mode 100644
index 00000000000..90fc13a6d9c
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/DbServerStub.java
@@ -0,0 +1,495 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+import org.acplt.oncrpc.server.*;
+
+/**
+ */
+public abstract class DbServerStub extends OncRpcServerStub implements OncRpcDispatchable {
+
+ public DbServerStub()
+ throws OncRpcException, IOException {
+ this(0);
+ }
+
+ public DbServerStub(int port)
+ throws OncRpcException, IOException {
+ info = new OncRpcServerTransportRegistrationInfo [] {
+ new OncRpcServerTransportRegistrationInfo(db_server.DB_RPC_SERVERPROG, 4001),
+ };
+ transports = new OncRpcServerTransport [] {
+ new OncRpcUdpServerTransport(this, port, info, 32768),
+ new OncRpcTcpServerTransport(this, port, info, 32768)
+ };
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program, int version, int procedure)
+ throws OncRpcException, IOException {
+ if ( version == 4001 ) {
+ switch ( procedure ) {
+ case 1: {
+ __env_cachesize_msg args$ = new __env_cachesize_msg();
+ call.retrieveCall(args$);
+ __env_cachesize_reply result$ = __DB_env_cachesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 2: {
+ __env_close_msg args$ = new __env_close_msg();
+ call.retrieveCall(args$);
+ __env_close_reply result$ = __DB_env_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 3: {
+ __env_create_msg args$ = new __env_create_msg();
+ call.retrieveCall(args$);
+ __env_create_reply result$ = __DB_env_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 4: {
+ __env_dbremove_msg args$ = new __env_dbremove_msg();
+ call.retrieveCall(args$);
+ __env_dbremove_reply result$ = __DB_env_dbremove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 5: {
+ __env_dbrename_msg args$ = new __env_dbrename_msg();
+ call.retrieveCall(args$);
+ __env_dbrename_reply result$ = __DB_env_dbrename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 6: {
+ __env_encrypt_msg args$ = new __env_encrypt_msg();
+ call.retrieveCall(args$);
+ __env_encrypt_reply result$ = __DB_env_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 7: {
+ __env_flags_msg args$ = new __env_flags_msg();
+ call.retrieveCall(args$);
+ __env_flags_reply result$ = __DB_env_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 8: {
+ __env_open_msg args$ = new __env_open_msg();
+ call.retrieveCall(args$);
+ __env_open_reply result$ = __DB_env_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 9: {
+ __env_remove_msg args$ = new __env_remove_msg();
+ call.retrieveCall(args$);
+ __env_remove_reply result$ = __DB_env_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 10: {
+ __txn_abort_msg args$ = new __txn_abort_msg();
+ call.retrieveCall(args$);
+ __txn_abort_reply result$ = __DB_txn_abort_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 11: {
+ __txn_begin_msg args$ = new __txn_begin_msg();
+ call.retrieveCall(args$);
+ __txn_begin_reply result$ = __DB_txn_begin_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 12: {
+ __txn_commit_msg args$ = new __txn_commit_msg();
+ call.retrieveCall(args$);
+ __txn_commit_reply result$ = __DB_txn_commit_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 13: {
+ __txn_discard_msg args$ = new __txn_discard_msg();
+ call.retrieveCall(args$);
+ __txn_discard_reply result$ = __DB_txn_discard_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 14: {
+ __txn_prepare_msg args$ = new __txn_prepare_msg();
+ call.retrieveCall(args$);
+ __txn_prepare_reply result$ = __DB_txn_prepare_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 15: {
+ __txn_recover_msg args$ = new __txn_recover_msg();
+ call.retrieveCall(args$);
+ __txn_recover_reply result$ = __DB_txn_recover_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 16: {
+ __db_associate_msg args$ = new __db_associate_msg();
+ call.retrieveCall(args$);
+ __db_associate_reply result$ = __DB_db_associate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 17: {
+ __db_bt_maxkey_msg args$ = new __db_bt_maxkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_maxkey_reply result$ = __DB_db_bt_maxkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 18: {
+ __db_bt_minkey_msg args$ = new __db_bt_minkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_minkey_reply result$ = __DB_db_bt_minkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 19: {
+ __db_close_msg args$ = new __db_close_msg();
+ call.retrieveCall(args$);
+ __db_close_reply result$ = __DB_db_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 20: {
+ __db_create_msg args$ = new __db_create_msg();
+ call.retrieveCall(args$);
+ __db_create_reply result$ = __DB_db_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 21: {
+ __db_del_msg args$ = new __db_del_msg();
+ call.retrieveCall(args$);
+ __db_del_reply result$ = __DB_db_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 22: {
+ __db_encrypt_msg args$ = new __db_encrypt_msg();
+ call.retrieveCall(args$);
+ __db_encrypt_reply result$ = __DB_db_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 23: {
+ __db_extentsize_msg args$ = new __db_extentsize_msg();
+ call.retrieveCall(args$);
+ __db_extentsize_reply result$ = __DB_db_extentsize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 24: {
+ __db_flags_msg args$ = new __db_flags_msg();
+ call.retrieveCall(args$);
+ __db_flags_reply result$ = __DB_db_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 25: {
+ __db_get_msg args$ = new __db_get_msg();
+ call.retrieveCall(args$);
+ __db_get_reply result$ = __DB_db_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 26: {
+ __db_h_ffactor_msg args$ = new __db_h_ffactor_msg();
+ call.retrieveCall(args$);
+ __db_h_ffactor_reply result$ = __DB_db_h_ffactor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 27: {
+ __db_h_nelem_msg args$ = new __db_h_nelem_msg();
+ call.retrieveCall(args$);
+ __db_h_nelem_reply result$ = __DB_db_h_nelem_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 28: {
+ __db_key_range_msg args$ = new __db_key_range_msg();
+ call.retrieveCall(args$);
+ __db_key_range_reply result$ = __DB_db_key_range_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 29: {
+ __db_lorder_msg args$ = new __db_lorder_msg();
+ call.retrieveCall(args$);
+ __db_lorder_reply result$ = __DB_db_lorder_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 30: {
+ __db_open_msg args$ = new __db_open_msg();
+ call.retrieveCall(args$);
+ __db_open_reply result$ = __DB_db_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 31: {
+ __db_pagesize_msg args$ = new __db_pagesize_msg();
+ call.retrieveCall(args$);
+ __db_pagesize_reply result$ = __DB_db_pagesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 32: {
+ __db_pget_msg args$ = new __db_pget_msg();
+ call.retrieveCall(args$);
+ __db_pget_reply result$ = __DB_db_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 33: {
+ __db_put_msg args$ = new __db_put_msg();
+ call.retrieveCall(args$);
+ __db_put_reply result$ = __DB_db_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 34: {
+ __db_re_delim_msg args$ = new __db_re_delim_msg();
+ call.retrieveCall(args$);
+ __db_re_delim_reply result$ = __DB_db_re_delim_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 35: {
+ __db_re_len_msg args$ = new __db_re_len_msg();
+ call.retrieveCall(args$);
+ __db_re_len_reply result$ = __DB_db_re_len_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 36: {
+ __db_re_pad_msg args$ = new __db_re_pad_msg();
+ call.retrieveCall(args$);
+ __db_re_pad_reply result$ = __DB_db_re_pad_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 37: {
+ __db_remove_msg args$ = new __db_remove_msg();
+ call.retrieveCall(args$);
+ __db_remove_reply result$ = __DB_db_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 38: {
+ __db_rename_msg args$ = new __db_rename_msg();
+ call.retrieveCall(args$);
+ __db_rename_reply result$ = __DB_db_rename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 39: {
+ __db_stat_msg args$ = new __db_stat_msg();
+ call.retrieveCall(args$);
+ __db_stat_reply result$ = __DB_db_stat_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 40: {
+ __db_sync_msg args$ = new __db_sync_msg();
+ call.retrieveCall(args$);
+ __db_sync_reply result$ = __DB_db_sync_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 41: {
+ __db_truncate_msg args$ = new __db_truncate_msg();
+ call.retrieveCall(args$);
+ __db_truncate_reply result$ = __DB_db_truncate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 42: {
+ __db_cursor_msg args$ = new __db_cursor_msg();
+ call.retrieveCall(args$);
+ __db_cursor_reply result$ = __DB_db_cursor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 43: {
+ __db_join_msg args$ = new __db_join_msg();
+ call.retrieveCall(args$);
+ __db_join_reply result$ = __DB_db_join_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 44: {
+ __dbc_close_msg args$ = new __dbc_close_msg();
+ call.retrieveCall(args$);
+ __dbc_close_reply result$ = __DB_dbc_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 45: {
+ __dbc_count_msg args$ = new __dbc_count_msg();
+ call.retrieveCall(args$);
+ __dbc_count_reply result$ = __DB_dbc_count_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 46: {
+ __dbc_del_msg args$ = new __dbc_del_msg();
+ call.retrieveCall(args$);
+ __dbc_del_reply result$ = __DB_dbc_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 47: {
+ __dbc_dup_msg args$ = new __dbc_dup_msg();
+ call.retrieveCall(args$);
+ __dbc_dup_reply result$ = __DB_dbc_dup_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 48: {
+ __dbc_get_msg args$ = new __dbc_get_msg();
+ call.retrieveCall(args$);
+ __dbc_get_reply result$ = __DB_dbc_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 49: {
+ __dbc_pget_msg args$ = new __dbc_pget_msg();
+ call.retrieveCall(args$);
+ __dbc_pget_reply result$ = __DB_dbc_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 50: {
+ __dbc_put_msg args$ = new __dbc_put_msg();
+ call.retrieveCall(args$);
+ __dbc_put_reply result$ = __DB_dbc_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ default:
+ call.failProcedureUnavailable();
+ }
+ } else {
+ call.failProcedureUnavailable();
+ }
+ }
+
+ public abstract __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg arg1);
+
+ public abstract __env_close_reply __DB_env_close_4001(__env_close_msg arg1);
+
+ public abstract __env_create_reply __DB_env_create_4001(__env_create_msg arg1);
+
+ public abstract __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg arg1);
+
+ public abstract __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg arg1);
+
+ public abstract __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg arg1);
+
+ public abstract __env_flags_reply __DB_env_flags_4001(__env_flags_msg arg1);
+
+ public abstract __env_open_reply __DB_env_open_4001(__env_open_msg arg1);
+
+ public abstract __env_remove_reply __DB_env_remove_4001(__env_remove_msg arg1);
+
+ public abstract __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg arg1);
+
+ public abstract __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg arg1);
+
+ public abstract __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg arg1);
+
+ public abstract __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg arg1);
+
+ public abstract __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg arg1);
+
+ public abstract __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg arg1);
+
+ public abstract __db_associate_reply __DB_db_associate_4001(__db_associate_msg arg1);
+
+ public abstract __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg arg1);
+
+ public abstract __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg arg1);
+
+ public abstract __db_close_reply __DB_db_close_4001(__db_close_msg arg1);
+
+ public abstract __db_create_reply __DB_db_create_4001(__db_create_msg arg1);
+
+ public abstract __db_del_reply __DB_db_del_4001(__db_del_msg arg1);
+
+ public abstract __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg arg1);
+
+ public abstract __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg arg1);
+
+ public abstract __db_flags_reply __DB_db_flags_4001(__db_flags_msg arg1);
+
+ public abstract __db_get_reply __DB_db_get_4001(__db_get_msg arg1);
+
+ public abstract __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg arg1);
+
+ public abstract __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg arg1);
+
+ public abstract __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg arg1);
+
+ public abstract __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg arg1);
+
+ public abstract __db_open_reply __DB_db_open_4001(__db_open_msg arg1);
+
+ public abstract __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg arg1);
+
+ public abstract __db_pget_reply __DB_db_pget_4001(__db_pget_msg arg1);
+
+ public abstract __db_put_reply __DB_db_put_4001(__db_put_msg arg1);
+
+ public abstract __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg arg1);
+
+ public abstract __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg arg1);
+
+ public abstract __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg arg1);
+
+ public abstract __db_remove_reply __DB_db_remove_4001(__db_remove_msg arg1);
+
+ public abstract __db_rename_reply __DB_db_rename_4001(__db_rename_msg arg1);
+
+ public abstract __db_stat_reply __DB_db_stat_4001(__db_stat_msg arg1);
+
+ public abstract __db_sync_reply __DB_db_sync_4001(__db_sync_msg arg1);
+
+ public abstract __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg arg1);
+
+ public abstract __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg arg1);
+
+ public abstract __db_join_reply __DB_db_join_4001(__db_join_msg arg1);
+
+ public abstract __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg arg1);
+
+ public abstract __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg arg1);
+
+ public abstract __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg arg1);
+
+ public abstract __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg arg1);
+
+ public abstract __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg arg1);
+
+ public abstract __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg arg1);
+
+ public abstract __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg arg1);
+
+}
+// End of DbServerStub.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_associate_msg.java b/storage/bdb/rpc_server/java/gen/__db_associate_msg.java
new file mode 100644
index 00000000000..8977303b99a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_associate_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 4/25/02 11:01 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int sdbpcl_id;
+ public int flags;
+
+ public __db_associate_msg() {
+ }
+
+ public __db_associate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(sdbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ sdbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_associate_reply.java b/storage/bdb/rpc_server/java/gen/__db_associate_reply.java
new file mode 100644
index 00000000000..476d0868b33
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_associate_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_reply implements XdrAble {
+ public int status;
+
+ public __db_associate_reply() {
+ }
+
+ public __db_associate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
new file mode 100644
index 00000000000..007ce16a974
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int maxkey;
+
+ public __db_bt_maxkey_msg() {
+ }
+
+ public __db_bt_maxkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(maxkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ maxkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
new file mode 100644
index 00000000000..855573271b3
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_maxkey_reply() {
+ }
+
+ public __db_bt_maxkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
new file mode 100644
index 00000000000..c86ec382456
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int minkey;
+
+ public __db_bt_minkey_msg() {
+ }
+
+ public __db_bt_minkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(minkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ minkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
new file mode 100644
index 00000000000..4d944b6bf33
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_minkey_reply() {
+ }
+
+ public __db_bt_minkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_close_msg.java b/storage/bdb/rpc_server/java/gen/__db_close_msg.java
new file mode 100644
index 00000000000..ce8d213701b
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_close_msg() {
+ }
+
+ public __db_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_close_reply.java b/storage/bdb/rpc_server/java/gen/__db_close_reply.java
new file mode 100644
index 00000000000..a9380e9c053
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_reply implements XdrAble {
+ public int status;
+
+ public __db_close_reply() {
+ }
+
+ public __db_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_create_msg.java b/storage/bdb/rpc_server/java/gen/__db_create_msg.java
new file mode 100644
index 00000000000..d21ca50f807
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_create_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __db_create_msg() {
+ }
+
+ public __db_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_create_reply.java b/storage/bdb/rpc_server/java/gen/__db_create_reply.java
new file mode 100644
index 00000000000..e3dcbbab14e
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+
+ public __db_create_reply() {
+ }
+
+ public __db_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java b/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java
new file mode 100644
index 00000000000..60e09db6ebb
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_cursor_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_cursor_msg() {
+ }
+
+ public __db_cursor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java b/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java
new file mode 100644
index 00000000000..bafd2817c67
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_cursor_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_cursor_reply() {
+ }
+
+ public __db_cursor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_del_msg.java b/storage/bdb/rpc_server/java/gen/__db_del_msg.java
new file mode 100644
index 00000000000..fdf47907dd6
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_del_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_del_msg() {
+ }
+
+ public __db_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_del_reply.java b/storage/bdb/rpc_server/java/gen/__db_del_reply.java
new file mode 100644
index 00000000000..8a55445944f
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_reply implements XdrAble {
+ public int status;
+
+ public __db_del_reply() {
+ }
+
+ public __db_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java
new file mode 100644
index 00000000000..46d9f8ee7e8
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_msg implements XdrAble {
+ public int dbpcl_id;
+ public String passwd;
+ public int flags;
+
+ public __db_encrypt_msg() {
+ }
+
+ public __db_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java
new file mode 100644
index 00000000000..a97cc98c90b
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __db_encrypt_reply() {
+ }
+
+ public __db_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java
new file mode 100644
index 00000000000..41a51cff9c4
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int extentsize;
+
+ public __db_extentsize_msg() {
+ }
+
+ public __db_extentsize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(extentsize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ extentsize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java b/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java
new file mode 100644
index 00000000000..409625486c7
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_reply implements XdrAble {
+ public int status;
+
+ public __db_extentsize_reply() {
+ }
+
+ public __db_extentsize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_flags_msg.java b/storage/bdb/rpc_server/java/gen/__db_flags_msg.java
new file mode 100644
index 00000000000..d8752e2e4dd
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_flags_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_flags_msg() {
+ }
+
+ public __db_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_flags_reply.java b/storage/bdb/rpc_server/java/gen/__db_flags_reply.java
new file mode 100644
index 00000000000..c4ec253db83
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_reply implements XdrAble {
+ public int status;
+
+ public __db_flags_reply() {
+ }
+
+ public __db_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_get_msg.java b/storage/bdb/rpc_server/java/gen/__db_get_msg.java
new file mode 100644
index 00000000000..3dfe8e9d86e
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_get_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_get_msg() {
+ }
+
+ public __db_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_get_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_get_reply.java b/storage/bdb/rpc_server/java/gen/__db_get_reply.java
new file mode 100644
index 00000000000..64ce525728a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __db_get_reply() {
+ }
+
+ public __db_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_get_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
new file mode 100644
index 00000000000..8d2ed1b1c0b
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int ffactor;
+
+ public __db_h_ffactor_msg() {
+ }
+
+ public __db_h_ffactor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(ffactor);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ ffactor = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
new file mode 100644
index 00000000000..1885ec50240
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_reply implements XdrAble {
+ public int status;
+
+ public __db_h_ffactor_reply() {
+ }
+
+ public __db_h_ffactor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
new file mode 100644
index 00000000000..7d084351755
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_msg implements XdrAble {
+ public int dbpcl_id;
+ public int nelem;
+
+ public __db_h_nelem_msg() {
+ }
+
+ public __db_h_nelem_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(nelem);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ nelem = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java b/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
new file mode 100644
index 00000000000..20c5c774e69
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_reply implements XdrAble {
+ public int status;
+
+ public __db_h_nelem_reply() {
+ }
+
+ public __db_h_nelem_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_join_msg.java b/storage/bdb/rpc_server/java/gen/__db_join_msg.java
new file mode 100644
index 00000000000..88c72dbd6ba
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_join_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_msg implements XdrAble {
+ public int dbpcl_id;
+ public int [] curs;
+ public int flags;
+
+ public __db_join_msg() {
+ }
+
+ public __db_join_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeIntVector(curs);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ curs = xdr.xdrDecodeIntVector();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_join_reply.java b/storage/bdb/rpc_server/java/gen/__db_join_reply.java
new file mode 100644
index 00000000000..80980e23d6c
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_join_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_join_reply() {
+ }
+
+ public __db_join_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java b/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java
new file mode 100644
index 00000000000..233077e0964
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_key_range_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_key_range_msg() {
+ }
+
+ public __db_key_range_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_key_range_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java b/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java
new file mode 100644
index 00000000000..09244c13d1d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_key_range_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_reply implements XdrAble {
+ public int status;
+ public double less;
+ public double equal;
+ public double greater;
+
+ public __db_key_range_reply() {
+ }
+
+ public __db_key_range_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDouble(less);
+ xdr.xdrEncodeDouble(equal);
+ xdr.xdrEncodeDouble(greater);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ less = xdr.xdrDecodeDouble();
+ equal = xdr.xdrDecodeDouble();
+ greater = xdr.xdrDecodeDouble();
+ }
+
+}
+// End of __db_key_range_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java b/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java
new file mode 100644
index 00000000000..3399ad8daf0
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_lorder_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_msg implements XdrAble {
+ public int dbpcl_id;
+ public int lorder;
+
+ public __db_lorder_msg() {
+ }
+
+ public __db_lorder_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java b/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java
new file mode 100644
index 00000000000..cdcda4d4f43
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_lorder_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_reply implements XdrAble {
+ public int status;
+
+ public __db_lorder_reply() {
+ }
+
+ public __db_lorder_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_open_msg.java b/storage/bdb/rpc_server/java/gen/__db_open_msg.java
new file mode 100644
index 00000000000..14dbd9e3b0c
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_open_msg.java
@@ -0,0 +1,50 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int type;
+ public int flags;
+ public int mode;
+
+ public __db_open_msg() {
+ }
+
+ public __db_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ type = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_open_reply.java b/storage/bdb/rpc_server/java/gen/__db_open_reply.java
new file mode 100644
index 00000000000..d90c3754c2f
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_open_reply.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+ public int type;
+ public int dbflags;
+ public int lorder;
+
+ public __db_open_reply() {
+ }
+
+ public __db_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(dbflags);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ type = xdr.xdrDecodeInt();
+ dbflags = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java
new file mode 100644
index 00000000000..a452ea4e381
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pagesize;
+
+ public __db_pagesize_msg() {
+ }
+
+ public __db_pagesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pagesize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pagesize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java b/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java
new file mode 100644
index 00000000000..830b2078b34
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_reply implements XdrAble {
+ public int status;
+
+ public __db_pagesize_reply() {
+ }
+
+ public __db_pagesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_pget_msg.java b/storage/bdb/rpc_server/java/gen/__db_pget_msg.java
new file mode 100644
index 00000000000..11d27ca9e46
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_pget_msg.java
@@ -0,0 +1,83 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_pget_msg() {
+ }
+
+ public __db_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pget_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_pget_reply.java b/storage/bdb/rpc_server/java/gen/__db_pget_reply.java
new file mode 100644
index 00000000000..86c9c2111b9
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __db_pget_reply() {
+ }
+
+ public __db_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_pget_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_put_msg.java b/storage/bdb/rpc_server/java/gen/__db_put_msg.java
new file mode 100644
index 00000000000..b6159cff3a8
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_put_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_put_msg() {
+ }
+
+ public __db_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_put_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_put_reply.java b/storage/bdb/rpc_server/java/gen/__db_put_reply.java
new file mode 100644
index 00000000000..fc89ae1c3bd
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __db_put_reply() {
+ }
+
+ public __db_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_put_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java
new file mode 100644
index 00000000000..c386bddd256
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_msg implements XdrAble {
+ public int dbpcl_id;
+ public int delim;
+
+ public __db_re_delim_msg() {
+ }
+
+ public __db_re_delim_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(delim);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ delim = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java
new file mode 100644
index 00000000000..aa8a797f53d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_reply implements XdrAble {
+ public int status;
+
+ public __db_re_delim_reply() {
+ }
+
+ public __db_re_delim_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java
new file mode 100644
index 00000000000..664de5c899c
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_len_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_msg implements XdrAble {
+ public int dbpcl_id;
+ public int len;
+
+ public __db_re_len_msg() {
+ }
+
+ public __db_re_len_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(len);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ len = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java
new file mode 100644
index 00000000000..dda27c8c123
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_len_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_reply implements XdrAble {
+ public int status;
+
+ public __db_re_len_reply() {
+ }
+
+ public __db_re_len_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java
new file mode 100644
index 00000000000..2c1290b6e74
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pad;
+
+ public __db_re_pad_msg() {
+ }
+
+ public __db_re_pad_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pad);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pad = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java b/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java
new file mode 100644
index 00000000000..f0aaa9a3a70
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_reply implements XdrAble {
+ public int status;
+
+ public __db_re_pad_reply() {
+ }
+
+ public __db_re_pad_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_remove_msg.java b/storage/bdb/rpc_server/java/gen/__db_remove_msg.java
new file mode 100644
index 00000000000..dfa9066a7ec
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_remove_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __db_remove_msg() {
+ }
+
+ public __db_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_remove_reply.java b/storage/bdb/rpc_server/java/gen/__db_remove_reply.java
new file mode 100644
index 00000000000..a2b86c04985
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_reply implements XdrAble {
+ public int status;
+
+ public __db_remove_reply() {
+ }
+
+ public __db_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_rename_msg.java b/storage/bdb/rpc_server/java/gen/__db_rename_msg.java
new file mode 100644
index 00000000000..12b434e3375
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_rename_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __db_rename_msg() {
+ }
+
+ public __db_rename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_rename_reply.java b/storage/bdb/rpc_server/java/gen/__db_rename_reply.java
new file mode 100644
index 00000000000..4e4a22be570
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_rename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_reply implements XdrAble {
+ public int status;
+
+ public __db_rename_reply() {
+ }
+
+ public __db_rename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_stat_msg.java b/storage/bdb/rpc_server/java/gen/__db_stat_msg.java
new file mode 100644
index 00000000000..af536b5f707
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_stat_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_stat_msg() {
+ }
+
+ public __db_stat_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_stat_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_stat_reply.java b/storage/bdb/rpc_server/java/gen/__db_stat_reply.java
new file mode 100644
index 00000000000..8df1460149a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_stat_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_reply implements XdrAble {
+ public int status;
+ public int [] stats;
+
+ public __db_stat_reply() {
+ }
+
+ public __db_stat_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(stats);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ stats = xdr.xdrDecodeIntVector();
+ }
+
+}
+// End of __db_stat_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_sync_msg.java b/storage/bdb/rpc_server/java/gen/__db_sync_msg.java
new file mode 100644
index 00000000000..c6594670fc6
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_sync_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_sync_msg() {
+ }
+
+ public __db_sync_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_sync_reply.java b/storage/bdb/rpc_server/java/gen/__db_sync_reply.java
new file mode 100644
index 00000000000..d0a8bc8b196
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_sync_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_reply implements XdrAble {
+ public int status;
+
+ public __db_sync_reply() {
+ }
+
+ public __db_sync_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java b/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java
new file mode 100644
index 00000000000..38810d65660
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_truncate_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_truncate_msg() {
+ }
+
+ public __db_truncate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java b/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java
new file mode 100644
index 00000000000..c4f68869007
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__db_truncate_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_reply implements XdrAble {
+ public int status;
+ public int count;
+
+ public __db_truncate_reply() {
+ }
+
+ public __db_truncate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(count);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java
new file mode 100644
index 00000000000..eb1ca7f7e17
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_close_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_msg implements XdrAble {
+ public int dbccl_id;
+
+ public __dbc_close_msg() {
+ }
+
+ public __dbc_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java
new file mode 100644
index 00000000000..47459aace36
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_reply implements XdrAble {
+ public int status;
+
+ public __dbc_close_reply() {
+ }
+
+ public __dbc_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java
new file mode 100644
index 00000000000..5f554e18a1b
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_count_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_count_msg() {
+ }
+
+ public __dbc_count_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java
new file mode 100644
index 00000000000..4daecdd2296
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_count_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_reply implements XdrAble {
+ public int status;
+ public int dupcount;
+
+ public __dbc_count_reply() {
+ }
+
+ public __dbc_count_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dupcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dupcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java
new file mode 100644
index 00000000000..bc4bd05f573
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_del_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_del_msg() {
+ }
+
+ public __dbc_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java
new file mode 100644
index 00000000000..e55ac9ffaf6
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_reply implements XdrAble {
+ public int status;
+
+ public __dbc_del_reply() {
+ }
+
+ public __dbc_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java
new file mode 100644
index 00000000000..9a3894e6158
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_dup_msg() {
+ }
+
+ public __dbc_dup_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java
new file mode 100644
index 00000000000..6b942f1a61a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __dbc_dup_reply() {
+ }
+
+ public __dbc_dup_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java
new file mode 100644
index 00000000000..672ace43fdd
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_get_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_get_msg() {
+ }
+
+ public __dbc_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_get_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java
new file mode 100644
index 00000000000..8671fec6335
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __dbc_get_reply() {
+ }
+
+ public __dbc_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_get_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java
new file mode 100644
index 00000000000..8ca3c6171a1
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java
@@ -0,0 +1,80 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_msg implements XdrAble {
+ public int dbccl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_pget_msg() {
+ }
+
+ public __dbc_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_pget_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java
new file mode 100644
index 00000000000..16cc795878d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __dbc_pget_reply() {
+ }
+
+ public __dbc_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_pget_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java b/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java
new file mode 100644
index 00000000000..98d12423dc5
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_put_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_put_msg() {
+ }
+
+ public __dbc_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_put_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java b/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java
new file mode 100644
index 00000000000..385f9f783fb
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__dbc_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __dbc_put_reply() {
+ }
+
+ public __dbc_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_put_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java
new file mode 100644
index 00000000000..d1fce1ffa35
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int gbytes;
+ public int bytes;
+ public int ncache;
+
+ public __env_cachesize_msg() {
+ }
+
+ public __env_cachesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(gbytes);
+ xdr.xdrEncodeInt(bytes);
+ xdr.xdrEncodeInt(ncache);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ gbytes = xdr.xdrDecodeInt();
+ bytes = xdr.xdrDecodeInt();
+ ncache = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java b/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java
new file mode 100644
index 00000000000..193f8355d71
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_reply implements XdrAble {
+ public int status;
+
+ public __env_cachesize_reply() {
+ }
+
+ public __env_cachesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_close_msg.java b/storage/bdb/rpc_server/java/gen/__env_close_msg.java
new file mode 100644
index 00000000000..5e657bacfa5
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __env_close_msg() {
+ }
+
+ public __env_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_close_reply.java b/storage/bdb/rpc_server/java/gen/__env_close_reply.java
new file mode 100644
index 00000000000..11e61f7c8c3
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_reply implements XdrAble {
+ public int status;
+
+ public __env_close_reply() {
+ }
+
+ public __env_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_create_msg.java b/storage/bdb/rpc_server/java/gen/__env_create_msg.java
new file mode 100644
index 00000000000..dbe546ae23a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_create_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_msg implements XdrAble {
+ public int timeout;
+
+ public __env_create_msg() {
+ }
+
+ public __env_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(timeout);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ timeout = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_create_reply.java b/storage/bdb/rpc_server/java/gen/__env_create_reply.java
new file mode 100644
index 00000000000..5427fc4bc1e
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_create_reply() {
+ }
+
+ public __env_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java
new file mode 100644
index 00000000000..9730a92c590
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __env_dbremove_msg() {
+ }
+
+ public __env_dbremove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java
new file mode 100644
index 00000000000..75cc5a940cc
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_reply implements XdrAble {
+ public int status;
+
+ public __env_dbremove_reply() {
+ }
+
+ public __env_dbremove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java
new file mode 100644
index 00000000000..0bbda262b64
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java
@@ -0,0 +1,47 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __env_dbrename_msg() {
+ }
+
+ public __env_dbrename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java b/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java
new file mode 100644
index 00000000000..0cc8882305d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_reply implements XdrAble {
+ public int status;
+
+ public __env_dbrename_reply() {
+ }
+
+ public __env_dbrename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java
new file mode 100644
index 00000000000..84e9a36d372
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String passwd;
+ public int flags;
+
+ public __env_encrypt_msg() {
+ }
+
+ public __env_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java b/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java
new file mode 100644
index 00000000000..e202a3089d0
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __env_encrypt_reply() {
+ }
+
+ public __env_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_flags_msg.java b/storage/bdb/rpc_server/java/gen/__env_flags_msg.java
new file mode 100644
index 00000000000..25cd5f85f6d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_flags_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+ public int onoff;
+
+ public __env_flags_msg() {
+ }
+
+ public __env_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(onoff);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ onoff = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_flags_reply.java b/storage/bdb/rpc_server/java/gen/__env_flags_reply.java
new file mode 100644
index 00000000000..d348a9224ea
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_reply implements XdrAble {
+ public int status;
+
+ public __env_flags_reply() {
+ }
+
+ public __env_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_open_msg.java b/storage/bdb/rpc_server/java/gen/__env_open_msg.java
new file mode 100644
index 00000000000..e4649b41f9e
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_open_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+ public int mode;
+
+ public __env_open_msg() {
+ }
+
+ public __env_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_open_reply.java b/storage/bdb/rpc_server/java/gen/__env_open_reply.java
new file mode 100644
index 00000000000..1994afb4cf2
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_open_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_open_reply() {
+ }
+
+ public __env_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_remove_msg.java b/storage/bdb/rpc_server/java/gen/__env_remove_msg.java
new file mode 100644
index 00000000000..b32d758f0f5
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_remove_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+
+ public __env_remove_msg() {
+ }
+
+ public __env_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__env_remove_reply.java b/storage/bdb/rpc_server/java/gen/__env_remove_reply.java
new file mode 100644
index 00000000000..19e4d52f662
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__env_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_reply implements XdrAble {
+ public int status;
+
+ public __env_remove_reply() {
+ }
+
+ public __env_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java b/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java
new file mode 100644
index 00000000000..ff44c534e46
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_abort_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_msg implements XdrAble {
+ public int txnpcl_id;
+
+ public __txn_abort_msg() {
+ }
+
+ public __txn_abort_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java b/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java
new file mode 100644
index 00000000000..58f275c1a8f
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_abort_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_reply implements XdrAble {
+ public int status;
+
+ public __txn_abort_reply() {
+ }
+
+ public __txn_abort_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java b/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java
new file mode 100644
index 00000000000..877031e8d3a
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_begin_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int parentcl_id;
+ public int flags;
+
+ public __txn_begin_msg() {
+ }
+
+ public __txn_begin_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(parentcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ parentcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java b/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java
new file mode 100644
index 00000000000..65a0c4016c2
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_begin_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_reply implements XdrAble {
+ public int status;
+ public int txnidcl_id;
+
+ public __txn_begin_reply() {
+ }
+
+ public __txn_begin_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(txnidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txnidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java b/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java
new file mode 100644
index 00000000000..4b988d0c282
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_commit_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_commit_msg() {
+ }
+
+ public __txn_commit_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java b/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java
new file mode 100644
index 00000000000..b26937b82dd
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_commit_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_reply implements XdrAble {
+ public int status;
+
+ public __txn_commit_reply() {
+ }
+
+ public __txn_commit_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java b/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java
new file mode 100644
index 00000000000..87f5d4f77a7
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_discard_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_discard_msg() {
+ }
+
+ public __txn_discard_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java b/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java
new file mode 100644
index 00000000000..9792211afcc
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_discard_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_reply implements XdrAble {
+ public int status;
+
+ public __txn_discard_reply() {
+ }
+
+ public __txn_discard_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java
new file mode 100644
index 00000000000..6e09f2c7771
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_msg implements XdrAble {
+ public int txnpcl_id;
+ public byte [] gid;
+
+ public __txn_prepare_msg() {
+ }
+
+ public __txn_prepare_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeOpaque(gid, 128);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ gid = xdr.xdrDecodeOpaque(128);
+ }
+
+}
+// End of __txn_prepare_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java b/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java
new file mode 100644
index 00000000000..d7590117952
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_reply implements XdrAble {
+ public int status;
+
+ public __txn_prepare_reply() {
+ }
+
+ public __txn_prepare_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_prepare_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java b/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java
new file mode 100644
index 00000000000..65153334403
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_recover_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int count;
+ public int flags;
+
+ public __txn_recover_msg() {
+ }
+
+ public __txn_recover_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(count);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_msg.java
diff --git a/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java b/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java
new file mode 100644
index 00000000000..0161ec949da
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/__txn_recover_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_reply implements XdrAble {
+ public int status;
+ public int [] txn;
+ public byte [] gid;
+ public int retcount;
+
+ public __txn_recover_reply() {
+ }
+
+ public __txn_recover_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(txn);
+ xdr.xdrEncodeDynamicOpaque(gid);
+ xdr.xdrEncodeInt(retcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txn = xdr.xdrDecodeIntVector();
+ gid = xdr.xdrDecodeDynamicOpaque();
+ retcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_reply.java
diff --git a/storage/bdb/rpc_server/java/gen/db_server.java b/storage/bdb/rpc_server/java/gen/db_server.java
new file mode 100644
index 00000000000..a14a77028a2
--- /dev/null
+++ b/storage/bdb/rpc_server/java/gen/db_server.java
@@ -0,0 +1,67 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+/**
+ * A collection of constants used by the "db_server" ONC/RPC program.
+ */
+public interface db_server {
+ public static final int __DB_db_close_4001 = 19;
+ public static final int __DB_db_flags_4001 = 24;
+ public static final int __DB_dbc_dup_4001 = 47;
+ public static final int __DB_db_encrypt_4001 = 22;
+ public static final int __DB_env_dbrename_4001 = 5;
+ public static final int __DB_env_remove_4001 = 9;
+ public static final int __DB_dbc_pget_4001 = 49;
+ public static final int __DB_env_cachesize_4001 = 1;
+ public static final int __DB_db_lorder_4001 = 29;
+ public static final int __DB_db_key_range_4001 = 28;
+ public static final int __DB_db_bt_minkey_4001 = 18;
+ public static final int __DB_db_sync_4001 = 40;
+ public static final int __DB_dbc_close_4001 = 44;
+ public static final int __DB_db_join_4001 = 43;
+ public static final int __DB_db_pagesize_4001 = 31;
+ public static final int DB_RPC_SERVERVERS = 4001;
+ public static final int __DB_db_open_4001 = 30;
+ public static final int __DB_dbc_get_4001 = 48;
+ public static final int __DB_db_cursor_4001 = 42;
+ public static final int __DB_txn_commit_4001 = 12;
+ public static final int __DB_dbc_del_4001 = 46;
+ public static final int __DB_env_create_4001 = 3;
+ public static final int __DB_env_open_4001 = 8;
+ public static final int __DB_txn_prepare_4001 = 14;
+ public static final int __DB_db_pget_4001 = 32;
+ public static final int __DB_db_stat_4001 = 39;
+ public static final int __DB_db_h_nelem_4001 = 27;
+ public static final int __DB_db_remove_4001 = 37;
+ public static final int __DB_db_re_delim_4001 = 34;
+ public static final int __DB_db_re_pad_4001 = 36;
+ public static final int __DB_txn_abort_4001 = 10;
+ public static final int __DB_txn_recover_4001 = 15;
+ public static final int __DB_db_get_4001 = 25;
+ public static final int __DB_db_extentsize_4001 = 23;
+ public static final int DB_RPC_SERVERPROG = 351457;
+ public static final int __DB_dbc_put_4001 = 50;
+ public static final int __DB_db_truncate_4001 = 41;
+ public static final int __DB_db_del_4001 = 21;
+ public static final int __DB_db_bt_maxkey_4001 = 17;
+ public static final int __DB_env_dbremove_4001 = 4;
+ public static final int __DB_txn_discard_4001 = 13;
+ public static final int __DB_db_re_len_4001 = 35;
+ public static final int __DB_env_close_4001 = 2;
+ public static final int __DB_env_flags_4001 = 7;
+ public static final int __DB_db_rename_4001 = 38;
+ public static final int __DB_db_associate_4001 = 16;
+ public static final int __DB_txn_begin_4001 = 11;
+ public static final int __DB_env_encrypt_4001 = 6;
+ public static final int __DB_db_h_ffactor_4001 = 26;
+ public static final int __DB_db_put_4001 = 33;
+ public static final int __DB_db_create_4001 = 20;
+ public static final int __DB_dbc_count_4001 = 45;
+}
+// End of db_server.java
diff --git a/storage/bdb/rpc_server/java/jrpcgen.jar b/storage/bdb/rpc_server/java/jrpcgen.jar
new file mode 100644
index 00000000000..338825b848d
--- /dev/null
+++ b/storage/bdb/rpc_server/java/jrpcgen.jar
Binary files differ
diff --git a/storage/bdb/rpc_server/java/oncrpc.jar b/storage/bdb/rpc_server/java/oncrpc.jar
new file mode 100644
index 00000000000..e0f5cfa6966
--- /dev/null
+++ b/storage/bdb/rpc_server/java/oncrpc.jar
Binary files differ
diff --git a/storage/bdb/rpc_server/java/s_jrpcgen b/storage/bdb/rpc_server/java/s_jrpcgen
new file mode 100644
index 00000000000..fed8cbf56bb
--- /dev/null
+++ b/storage/bdb/rpc_server/java/s_jrpcgen
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s DbServerStub ../db_server.x
diff --git a/storage/bdb/rpc_server/rpc.src b/storage/bdb/rpc_server/rpc.src
new file mode 100644
index 00000000000..7afee49b066
--- /dev/null
+++ b/storage/bdb/rpc_server/rpc.src
@@ -0,0 +1,718 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc.src,v 1.75 2002/07/18 02:57:19 margo Exp $
+#
+# Syntax:
+# BEGIN function_name {CODE | RETCODE | NOFUNC}
+# CODE: generate XDR and client code, return status
+# Used for functions that just return a status and nothing else.
+# RETCODE:generate XDR and client code, call return function
+# (generate template return function)
+# Used for functions that returns data.
+# NOFUNC: generate a client "unsupported function" with right args
+# Used for unsupported functions.
+#
+# ARG {IGNORE | STRING | INT | DBT | LIST | ID | CONST} C-type varname
+# IGNORE: not passed to server
+# STRING: string passed to server
+# DBT: DBT arg passed to server
+# LIST: list passed to server (NULL-terminated list of something)
+# INT: integer passed to server
+# ID: cl_id from arg passed to server
+# GID: global id passed to server
+# CONST: do not generate COMPQUIET (for NOFUNC only)
+# FUNCPROT prototype
+# FUNCARG functiontype
+# These two *MUST* go together and FUNCPROT *MUST* be first. These
+# are for the tricky user-supplied functions to some methods. They
+# are not supported in RPC, so will be ignored, but the complicated
+# syntax of their argument requires we have a special flag for them
+# that contains the verbatim text to use in the prototype and the
+# c-type, respectively. The FUNCARG must include the function, and
+# must call it 'funcN', where N is the count of functions. Almost
+# always it must be func0. A *very* few methods have more than one
+# user-supplied functions, in those cases, it must be func0, func1, etc.
+#
+# All messages automatically return "status" and return that from
+# the call to the function. RET's are additional things the server
+# may return. RET is like ARG but does not need the IGNORE option.
+# RET {STRING | INT | DBT | LIST | ID} varname [GID | INT | ID]
+# STRING: string from server
+# DBT: DBT arg from server
+# LIST: list from server (NULL-terminated list)
+# Must have list type of GID, ID or INT specified
+# INT: integer from server
+# ID: id from server stored in cl_id
+# END function end.
+
+#
+# Environment functions
+#
+BEGIN env_alloc NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN set_app_dispatch NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)
+FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops))
+END
+BEGIN env_cachesize CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN env_close RETCODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+END
+BEGIN env_create RETCODE
+ARG IGNORE DB_ENV * dbenv
+ARG INT long timeout
+RET ID long env
+END
+BEGIN set_data_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN env_dbremove CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN env_dbrename CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN env_encrypt CODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN env_set_feedback NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int, int)
+FUNCARG void (*func0) __P((DB_ENV *, int, int))
+END
+BEGIN env_flags CODE
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT int onoff
+END
+BEGIN set_lg_bsize NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t bsize
+END
+BEGIN set_lg_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_lg_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lg_regionmax NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_conflict NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int8_t * conflicts
+ARG INT int modes
+END
+BEGIN set_lk_detect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t detect
+END
+BEGIN set_lk_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_locks NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_lockers NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_max_objects NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_mp_mmapsize NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT size_t mmapsize
+END
+BEGIN env_open RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+ARG INT int mode
+RET ID long env
+END
+BEGIN env_paniccall NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN env_remove RETCODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * home
+ARG INT u_int32_t flags
+END
+BEGIN set_shm_key NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT long shm_key
+END
+BEGIN set_tas_spins NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t tas_spins
+END
+BEGIN set_timeout NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+BEGIN set_tmp_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
+END
+BEGIN set_tx_max NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_tx_timestamp NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT time_t * max
+END
+BEGIN set_verbose NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t which
+ARG INT int onoff
+END
+#
+# Transaction functions
+#
+BEGIN txn_abort RETCODE
+ARG ID DB_TXN * txnp
+END
+BEGIN txn_begin RETCODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * parent
+ARG IGNORE DB_TXN ** txnpp
+ARG INT u_int32_t flags
+RET ID long txnid
+END
+BEGIN txn_checkpoint NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t kbyte
+ARG INT u_int32_t min
+ARG INT u_int32_t flags
+END
+BEGIN txn_commit RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
+END
+BEGIN txn_discard RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
+END
+BEGIN txn_prepare CODE
+ARG ID DB_TXN * txnp
+ARG GID u_int8_t * gid
+END
+BEGIN txn_recover RETCODE
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_PREPLIST * preplist
+ARG INT long count
+ARG IGNORE long * retp
+ARG INT u_int32_t flags
+RET LIST DB_TXN * txn ID
+RET LIST u_int8_t * gid GID
+RET INT long retcount
+END
+BEGIN txn_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_TXN_STAT ** statp
+ARG INT u_int32_t flags
+END
+BEGIN txn_timeout NOFUNC
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+#
+# Replication functions
+#
+BEGIN rep_elect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int nsites
+ARG INT int pri
+ARG INT u_int32_t timeout
+ARG IGNORE int * idp
+END
+BEGIN rep_flush NOFUNC
+ARG ID DB_ENV * dbenv
+END
+BEGIN rep_process_message NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * rec
+ARG DBT DBT * control
+ARG IGNORE int * idp
+END
+BEGIN rep_set_limit NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t mbytes
+ARG INT u_int32_t bytes
+END
+BEGIN rep_set_request NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t min
+ARG INT u_int32_t max
+END
+BEGIN rep_set_rep_transport NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int id
+FUNCPROT int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)
+FUNCARG int (*func0) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t))
+END
+BEGIN rep_start NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * cdata
+ARG INT u_int32_t flags
+END
+BEGIN rep_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_REP_STAT ** statp
+ARG INT u_int32_t flags
+END
+
+#
+# Database functions
+#
+BEGIN db_alloc NOFUNC
+ARG ID DB * dbp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN db_associate CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG ID DB * sdbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *, DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *, DBT *))
+ARG INT u_int32_t flags
+END
+BEGIN db_bt_compare NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_bt_maxkey CODE
+ARG ID DB * dbp
+ARG INT u_int32_t maxkey
+END
+BEGIN db_bt_minkey CODE
+ARG ID DB * dbp
+ARG INT u_int32_t minkey
+END
+BEGIN db_bt_prefix NOFUNC
+ARG ID DB * dbp
+FUNCPROT size_t(*)(DB *, const DBT *, const DBT *)
+FUNCARG size_t (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_set_append_recno NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, DBT *, db_recno_t)
+FUNCARG int (*func0) __P((DB *, DBT *, db_recno_t))
+END
+BEGIN db_cache_priority NOFUNC
+ARG ID DB * dbp
+ARG INT DB_CACHE_PRIORITY priority
+END
+BEGIN db_cachesize NOFUNC
+ARG ID DB * dbp
+ARG INT u_int32_t gbytes
+ARG INT u_int32_t bytes
+ARG INT int ncache
+END
+BEGIN db_close RETCODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_create RETCODE
+ARG IGNORE DB * dbp
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+RET ID long db
+END
+BEGIN db_del CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG INT u_int32_t flags
+END
+BEGIN db_dup_compare NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_encrypt CODE
+ARG ID DB * dbp
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN db_extentsize CODE
+ARG ID DB * dbp
+ARG INT u_int32_t extentsize
+END
+BEGIN db_fd NOFUNC
+ARG ID DB * dbp
+ARG IGNORE int * fdp
+END
+BEGIN db_feedback NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB *, int, int)
+FUNCARG void (*func0) __P((DB *, int, int))
+END
+BEGIN db_flags CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_get RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN db_h_ffactor CODE
+ARG ID DB * dbp
+ARG INT u_int32_t ffactor
+END
+BEGIN db_h_hash NOFUNC
+ARG ID DB * dbp
+FUNCPROT u_int32_t(*)(DB *, const void *, u_int32_t)
+FUNCARG u_int32_t (*func0) __P((DB *, const void *, u_int32_t))
+END
+BEGIN db_h_nelem CODE
+ARG ID DB * dbp
+ARG INT u_int32_t nelem
+END
+BEGIN db_key_range RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG IGNORE DB_KEY_RANGE * range
+ARG INT u_int32_t flags
+RET DBL double less
+RET DBL double equal
+RET DBL double greater
+END
+BEGIN db_lorder CODE
+ARG ID DB * dbp
+ARG INT int lorder
+END
+# XXX
+# The line:
+# RET INT u_int32_t dbflags
+# should go away when a get_flags method exists. It is
+# needed now because Tcl looks at dbp->flags.
+#
+BEGIN db_open RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT DBTYPE type
+ARG INT u_int32_t flags
+ARG INT int mode
+RET ID long db
+RET INT DBTYPE type
+RET INT u_int32_t dbflags
+RET INT int lorder
+END
+BEGIN db_pagesize CODE
+ARG ID DB * dbp
+ARG INT u_int32_t pagesize
+END
+BEGIN db_panic NOFUNC
+ARG ID DB * dbp
+FUNCPROT void (*)(DB_ENV *, int)
+FUNCARG void (*func0) __P((DB_ENV *, int))
+END
+BEGIN db_pget RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
+END
+BEGIN db_put RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+BEGIN db_re_delim CODE
+ARG ID DB * dbp
+ARG INT int delim
+END
+BEGIN db_re_len CODE
+ARG ID DB * dbp
+ARG INT u_int32_t len
+END
+BEGIN db_re_pad CODE
+ARG ID DB * dbp
+ARG INT int pad
+END
+BEGIN db_re_source NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * re_source
+END
+BEGIN db_remove RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN db_rename RETCODE
+ARG ID DB * dbp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN db_stat RETCODE
+ARG ID DB * dbp
+ARG IGNORE void * sp
+ARG INT u_int32_t flags
+RET LIST u_int32_t * stats INT
+END
+BEGIN db_sync CODE
+ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_truncate RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE u_int32_t * countp
+ARG INT u_int32_t flags
+RET INT u_int32_t count
+END
+BEGIN db_upgrade NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * fname
+ARG INT u_int32_t flags
+END
+BEGIN db_verify NOFUNC
+ARG ID DB * dbp
+ARG STRING const char * fname
+ARG STRING const char * subdb
+ARG IGNORE FILE * outfile
+ARG INT u_int32_t flags
+END
+#
+# Cursor functions
+#
+BEGIN db_cursor RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE DBC ** dbcpp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN db_join RETCODE
+ARG ID DB * dbp
+ARG LIST DBC ** curs ID
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_close RETCODE
+ARG ID DBC * dbc
+END
+BEGIN dbc_count RETCODE
+ARG ID DBC * dbc
+ARG IGNORE db_recno_t * countp
+ARG INT u_int32_t flags
+RET INT db_recno_t dupcount
+END
+BEGIN dbc_del CODE
+ARG ID DBC * dbc
+ARG INT u_int32_t flags
+END
+BEGIN dbc_dup RETCODE
+ARG ID DBC * dbc
+ARG IGNORE DBC ** dbcp
+ARG INT u_int32_t flags
+RET ID long dbcid
+END
+BEGIN dbc_get RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+RET DBT DBT * data
+END
+BEGIN dbc_pget RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
+END
+BEGIN dbc_put RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
+END
+
+#
+# Unsupported environment subsystems
+#
+#
+# Locking subsystem
+#
+BEGIN lock_detect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t flags
+ARG INT u_int32_t atype
+ARG IGNORE int * aborted
+END
+BEGIN lock_get NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG CONST const DBT * obj
+ARG INT db_lockmode_t mode
+ARG IGNORE DB_LOCK * lock
+END
+BEGIN lock_id NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t * idp
+END
+BEGIN lock_id_free NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t id
+END
+BEGIN lock_put NOFUNC
+ARG ID DB_ENV * dbenv
+ARG ID DB_LOCK * lock
+END
+BEGIN lock_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOCK_STAT ** statp
+ARG INT u_int32_t flags
+END
+BEGIN lock_vec NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t locker
+ARG INT u_int32_t flags
+ARG IGNORE DB_LOCKREQ * list
+ARG INT int nlist
+ARG IGNORE DB_LOCKREQ ** elistp
+END
+#
+# Logging subsystem
+#
+BEGIN log_archive NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE char *** listp
+ARG INT u_int32_t flags
+END
+BEGIN log_cursor NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOGC ** logcp
+ARG INT u_int32_t flags
+END
+#
+# Don't do log_compare. It doesn't have an env we can get at,
+# and it doesn't manipulate DB internal information.
+#
+BEGIN log_file NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+ARG STRING char * namep
+ARG INT size_t len
+END
+BEGIN log_flush NOFUNC
+ARG ID DB_ENV * dbenv
+ARG CONST const DB_LSN * lsn
+END
+BEGIN log_put NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+ARG DBT const DBT * data
+ARG INT u_int32_t flags
+END
+BEGIN log_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOG_STAT ** statp
+ARG INT u_int32_t flags
+END
+#
+# Mpool Subsystem
+#
+BEGIN memp_fcreate NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_MPOOLFILE ** mpf
+ARG IGNORE u_int32_t flags
+END
+BEGIN memp_register NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int ftype
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
+FUNCARG int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *))
+END
+BEGIN memp_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_MPOOL_STAT ** gstatp
+ARG IGNORE DB_MPOOL_FSTAT *** fstatp
+ARG INT u_int32_t flags
+END
+BEGIN memp_sync NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LSN * lsn
+END
+BEGIN memp_trickle NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int pct
+ARG IGNORE int * nwrotep
+END
diff --git a/storage/bdb/tcl/docs/db.html b/storage/bdb/tcl/docs/db.html
new file mode 100644
index 00000000000..4f04c2c4f96
--- /dev/null
+++ b/storage/bdb/tcl/docs/db.html
@@ -0,0 +1,263 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Database Commands"></A>Database Commands</H2>
+The database commands provide a fairly straightforward mapping to the
+DB method functions.
+
+<P>
+<B>> berkdb open</B>
+<dl>
+
+<dt><B>[-btcompare <I>proc</I>]</B><dd>
+Sets the Btree comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_bt_compare.html">DB->set_bt_compare</A>
+method.
+
+<dt><B>[-btree|-hash|-recno|-queue|-unknown]</B><dd>
+</td><td>
+Select the database type:<br>
+DB_BTREE, DB_HASH, DB_RECNO, DB_QUEUE or DB_UNKNOWN.
+
+
+<dt><B>[-cachesize {<I>gbytes bytes ncaches</I>}]</B><dd>
+Sets the size of the database cache to the size specified by
+<I>gbytes</I> and <I>bytes</I>, broken up into <I>ncaches</I> number of
+caches using the
+<A HREF="../../docs/api_c/db_set_cachesize.html">DB->set_cachesize</A>
+method.
+
+<dt><B>[-create]</B><dd>
+Selects the DB_CREATE flag to create underlying files.
+
+<dt><B>[-delim <I>delim</I>]</B><dd>
+Sets the delimiting byte for variable length records to <I>delim</I>
+using the
+<A HREF="../../docs/api_c/db_set_re_delim.html">DB->set_re_delim</A>
+method.
+
+<dt><B>[-dup]</B><dd>
+Selects the DB_DUP flag to permit duplicates in the database.
+
+<dt><B>[-dupcompare <I>proc</I>]</B><dd>
+Sets the duplicate data comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_dup_compare.html">DB->set_dup_compare</A>
+method.
+
+<dt><B>[-dupsort]</B><dd>
+Selects the DB_DUPSORT flag to support sorted duplicates.
+
+<dt><B>[-env <I>env</I>]</B><dd>
+The database environment.
+
+<dt><B>[-errfile <I>filename</I>]</B><dd>
+Specifies the error file to use for this environment to <I>filename</I>
+by calling
+<A HREF="../../docs/api_c/db_set_errfile.html">DB->set_errfile</A>.
+If the file already exists then we will append to the end of the file.
+
+<dt><B>[-excl]</B><dd>
+Selects the DB_EXCL flag to exclusively create underlying files.
+
+<dt><B>[-extent <I>size</I>]</B><dd>
+Sets the size of a Queue database extent to the given <I>size</I> using
+the
+<A HREF="../../docs/api_c/db_set_q_extentsize.html">DB->set_q_extentsize</A>
+method.
+
+<dt><B>[-ffactor <I>density</I>]</B><dd>
+Sets the hash table key density to the given <I>density</I> using the
+<A HREF="../../docs/api_c/db_set_h_ffactor.html">DB->set_h_ffactor</A>
+method.
+
+<dt><B>[-hashproc <I>proc</I>]</B><dd>
+Sets a user-defined hash function to the Tcl procedure named <I>proc</I>
+using the
+<A HREF="../../docs/api_c/db_set_h_hash.html">DB->set_h_hash</A> method.
+
+<dt><B>[-len <I>len</I>]</B><dd>
+Sets the length of fixed-length records to <I>len</I> using the
+<A HREF="../../docs/api_c/db_set_re_len.html">DB->set_re_len</A>
+method.
+
+<dt><B>[-lorder <I>order</I>]</B><dd>
+Sets the byte order for integers stored in the database meta-data to
+the given <I>order</I> using the
+<A HREF="../../docs/api_c/db_set_lorder.html">DB->set_lorder</A>
+method.
+
+<dt><B>[-minkey <I>minkey</I>]</B><dd>
+Sets the minimum number of keys per Btree page to <I>minkey</I> using
+the
+<A HREF="../../docs/api_c/db_set_bt_minkey.html">DB->set_bt_minkey</A>
+method.
+
+<dt><B>[-mode <I>mode</I>]</B><dd>
+Specifies the mode for created files.
+
+<dt><B>[-nelem <I>size</I>]</B><dd>
+Sets the hash table size estimate to the given <I>size</I> using the
+<A HREF="../../docs/api_c/db_set_h_nelem.html">DB->set_h_nelem</A>
+method.
+
+<dt><B>[-nommap]</B><dd>
+Selects the DB_NOMMAP flag to forbid mmaping of files.
+
+<dt><B>[-pad <I>pad</I>]</B><dd>
+Sets the pad character used for fixed length records to <I>pad</I> using
+the
+<A HREF="../../docs/db_set_re_pad.html">DB->set_re_pad</A> method.
+
+<dt><B>[-pagesize <I>pagesize</I>]</B><dd>
+Sets the size of the database page to <I>pagesize</I> using the
+<A HREF="../../docs/api_c/db_set_pagesize.html">DB->set_pagesize</A>
+method.
+
+<dt><B>[-rdonly]</B><dd>
+Selects the DB_RDONLY flag for opening in read-only mode.
+
+<dt><B>[-recnum]</B><dd>
+Selects the DB_RECNUM flag to support record numbers in Btrees.
+
+<dt><B>[-renumber]</B><dd>
+Selects the DB_RENUMBER flag to support mutable record numbers.
+
+<dt><B>[-revsplitoff]</B><dd>
+Selects the DB_REVSPLITOFF flag to suppress reverse splitting of pages
+on deletion.
+
+<dt><B>[-snapshot]</B><dd>
+Selects the DB_SNAPSHOT flag to support database snapshots.
+
+<dt><B>[-source <I>file</I>]</B><dd>
+Sets the backing source file name to <I>file</I> using the
+<A HREF="../../docs/api_c/db_set_re_source.html">DB->set_re_source</A>
+method.
+
+<dt><B>[-truncate]</B><dd>
+Selects the DB_TRUNCATE flag to truncate the database.
+
+<dt><B>[--]</B><dd>
+Terminate the list of options and use remaining arguments as the file
+or subdb names (thus allowing the use of filenames beginning with a dash
+'-').
+
+<dt><B>[<I>filename </I>[<I>subdbname</I>]]</B><dd>
+The names of the database and sub-database.
+</dl>
+
+<HR WIDTH="100%">
+<B>> berkdb upgrade [-dupsort] [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_upgrade.html">DB->upgrade</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly upgrade the database filename within the context of that
+environment. The <B>-dupsort</B> option selects the DB_DUPSORT flag for
+upgrading. The use of --<B> </B>terminates the list of options, thus allowing
+filenames beginning with a dash.
+<P>
+
+<HR WIDTH="100%">
+<B>> berkdb verify [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_verify.html">DB->verify</A>
+function.&nbsp; If the command is given the <B>-env</B> option, then we
+will accordingly verify the database filename within the context of that
+environment.&nbsp; The use of --<B> </B>terminates the list of options,
+thus allowing filenames beginning with a dash.
+<P>
+
+<HR WIDTH="100%"><B>> <I>db</I> del</B>
+<P>There are no undocumented options.
+
+<HR WIDTH="100%">
+<B>> <I>db</I> join [-nosort] <I>db0.c0 db1.c0</I> ...</B>
+<P>This command will invoke the <A HREF="../../docs/api_c/db_join.html">db_join</A>
+function.&nbsp; After it successfully joins a database, we bind it to a
+new Tcl command of the form <B><I>dbN.cX, </I></B>where X is an integer
+starting at 0 (e.g. <B>db2.c0, db3.c0, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I>
+to create the top level database function.&nbsp; It is through this cursor
+handle that the user can access the joined data items.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort -</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<P>
+This command will invoke the
+<A HREF="../../docs/api_c/db_create.html">db_create</A> function. If
+the command is given the <B>-env</B> option, then we will accordingly
+creating the database within the context of that environment. After it
+successfully gets a handle to a database, we bind it to a new Tcl
+command of the form <B><I>dbX, </I></B>where X is an integer starting
+at 0 (e.g. <B>db0, db1, </B>etc).
+
+<p>
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level
+database function. It is through this handle that the user can access
+all of the commands described in the <A HREF="#Database Commands">
+Database Commands</A> section. Internally, the database handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future database calls access the appropriate handle.
+
+<P>
+After parsing all of the optional arguments affecting the setup of the
+database and making the appropriate calls to DB to manipulate those
+values, we open the database for the user. It translates to the
+<A HREF="../../docs/api_c/db_open.html">DB->open</A> method call after
+parsing all of the various optional arguments. We automatically set the
+DB_THREAD flag. The arguments are:
+
+<HR WIDTH="100%">
+<B>> <I>db</I> get_join [-nosort] {db key} {db key} ...</B>
+<P>This command performs a join operation on the keys specified and returns
+a list of the joined {key data} pairs.
+<P>The options are:
+<UL>
+<LI>
+<B>-nosort</B> This flag causes DB not to sort the cursors based on the
+number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
+flag being set.</LI>
+</UL>
+
+<HR WIDTH="100%">
+<B>> <I>db</I> keyrange [-txn <I>id</I>] key</B>
+<P>This command returns the range for the given <B>key</B>.&nbsp; It returns
+a list of 3 double elements of the form {<B><I>less equal greater</I></B>}
+where <B><I>less</I></B> is the percentage of keys less than the given
+key, <B><I>equal</I></B> is the percentage equal to the given key and <B><I>greater</I></B>
+is the percentage greater than the given key.&nbsp; If the -txn option
+is specified it performs this operation under transaction protection.
+
+<HR WIDTH="100%"><B>> <I>db</I> put</B>
+<P>The <B>undocumented</B> options are:
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
+
+<HR WIDTH="100%"><B>> <I>dbc</I> put</B>
+<P>The <B>undocumented</B> options are:
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
+
+</BODY>
+</HTML>
diff --git a/storage/bdb/tcl/docs/env.html b/storage/bdb/tcl/docs/env.html
new file mode 100644
index 00000000000..79c349841ac
--- /dev/null
+++ b/storage/bdb/tcl/docs/env.html
@@ -0,0 +1,354 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+Environment Commands</h2>
+Environments provide a structure for creating a consistent environment
+for processes using one or more of the features of Berkeley DB.&nbsp; Unlike
+some of the database commands, the environment commands are very low level.
+<br>
+<hr WIDTH="100%">
+<p>The user may create and open a new DB environment&nbsp; by invoking:
+<p><b>> berkdb env</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-create] [-home<i> directory</i>] [-mode <i>mode</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-data_dir <i>directory</i>] [-log_dir <i>directory</i>]
+[-tmp_dir <i>directory</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-nommap] [-private] [-recover] [-recover_fatal]
+[-system_mem] [-errfile <i>filename</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-use_environ] [-use_environ_root] [-verbose
+{<i>which </i>on|off}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-region_init]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cachesize {<i>gbytes bytes ncaches</i>}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-mmapsize<i> size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_buffer <i>size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_conflict {<i>nmodes </i>{<i>matrix</i>}}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_detect default|oldest|random|youngest]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_locks <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_lockers <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_objects <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-overwrite]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-client_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server <i>hostname</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_master] [-rep_client]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_transport <i>{ machineid sendproc }</i>]</b>
+<br>&nbsp;
+<p>This command opens up an environment.&nbsp;&nbsp; We automatically set
+the DB_THREAD and the DB_INIT_MPOOL flags.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-cdb</b> selects the DB_INIT_CDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-cdb_alldb</b> selects the DB_CDB_ALLDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-lock</b> selects the DB_INIT_LOCK flag for the locking subsystem</li>
+
+<li>
+<b>-log</b> selects the DB_INIT_LOG flag for the logging subsystem</li>
+
+<li>
+<b>-txn</b> selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags
+for the transaction subsystem.&nbsp; If <b>nosync</b> is specified, then
+it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits</li>
+
+<li>
+<b>-create </b>selects the DB_CREATE flag to create underlying files</li>
+
+<li>
+<b>-home <i>directory </i></b>selects the home directory of the environment</li>
+
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
+
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
+
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
+
+<li>
+<b>-mode <i>mode </i></b>sets the permissions of created files to <b><i>mode</i></b></li>
+
+<li>
+<b>-nommap</b> selects the DB_NOMMAP flag to disallow using mmap'ed files</li>
+
+<li>
+<b>-private</b> selects the DB_PRIVATE flag for a private environment</li>
+
+<li>
+<b>-recover</b> selects the DB_RECOVER flag for recovery</li>
+
+<li>
+<b>-recover_fatal</b> selects the DB_RECOVER_FATAL flag for catastrophic
+recovery</li>
+
+<li>
+<b>-system_mem</b> selects the DB_SYSTEM_MEM flag to use system memory</li>
+
+<li>
+<b>-errfile </b>specifies the error file to use for this environment to
+<b><i>filename</i></b>
+by calling <a href="../../docs/api_c/env_set_errfile.html">DBENV->set_errfile</a><b><i>.
+</i></b>If
+the file already exists then we will append to the end of the file</li>
+
+<li>
+<b>-use_environ</b> selects the DB_USE_ENVIRON flag to affect file naming</li>
+
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to have the
+root environment affect file naming</li>
+
+<li>
+<b>-verbose</b> produces verbose error output for the given which subsystem,
+using the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
+method.&nbsp;&nbsp; See the description of <a href="#> <env> verbose which on|off">verbose</a>
+below for valid <b><i>which </i></b>values</li>
+
+<li>
+<b>-region_init </b>specifies that the user wants to page fault the region
+in on startup using the <a href="../../docs/api_c/env_set_region_init.html">DBENV->set_region_init</a>
+method call</li>
+
+<li>
+<b>-cachesize </b>sets the size of the database cache to the size&nbsp;
+specified by <b><i>gbytes </i></b>and <b><i>bytes, </i></b>broken up into
+<b><i>ncaches</i></b>
+number of caches using the <a href="../../docs/api_c/env_set_cachesize.html">DBENV->set_cachesize</a>
+method</li>
+
+<li>
+<b>-mmapsize </b>sets the size of the database page to <b><i>size </i></b>using
+the <a href="../../docs/api_c/env_set_mp_mmapsize.html">DBENV->set_mp_mmapsize</a>
+method</li>
+
+<li>
+<b>-log_max </b>sets the maximum size of the log file to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_max.html">DBENV->set_lg_max</a>
+call</li>
+
+<li>
+<b>-log_regionmax </b>sets the size of the log region to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_regionmax.html">DBENV->set_lg_regionmax</a>
+call</li>
+
+<li>
+<b>-log_buffer </b>sets the size of the log file in bytes to <b><i>size</i></b>
+using the <a href="../../docs/api_c/env_set_lg_bsize.html">DBENV->set_lg_bsize</a>
+call</li>
+
+<li>
+<b>-lock_conflict </b>sets the number of lock modes to <b><i>nmodes</i></b>
+and sets the locking policy for those modes to the <b><i>conflict_matrix</i></b>
+given using the <a href="../../docs/api_c/env_set_lk_conflict.html">DBENV->set_lk_conflict</a>
+method call</li>
+
+<li>
+<b>-lock_detect </b>sets the deadlock detection policy to the given policy
+using the <a href="../../docs/env_set_lk_detect.html">DBENV->set_lk_detect</a>
+method call.&nbsp; The policy choices are:</li>
+
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<li>
+<b>-lock_max </b>sets the maximum size of the lock table to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max.html">DBENV->set_lk_max</a>
+method call</li>
+
+<li>
+<b>-lock_max_locks </b>sets the maximum number of locks to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max_locks.html">DBENV->set_lk_max_locks</a>
+method call</li>
+
+<li>
+<b>-lock_max_lockers </b>sets the maximum number of locking entities to
+<b><i>max
+</i></b>using the <a href="../../docs/api_c/env_set_lk_max_lockers.html">DBENV->set_lk_max_lockers</a>
+method call</li>
+
+<li>
+<b>-lock_max_objects </b>sets the maximum number of simultaneously locked
+objects to <b><i>max </i></b>using the <a href="../../docs/api_c/env_set_lk_max_objects.html">DBENV->set_lk_max_objects</a>
+method call</li>
+
+<li>
+<b>-lock_timeout </b>sets the timeout for locks in the environment</li>
+
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
+
+<li>
+<b>-txn_max </b>sets the maximum size of the transaction table to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_txn_max.html">DBENV->set_txn_max</a>
+method call</li>
+
+<li>
+<b>-txn_timeout </b>sets the timeout for transactions in the environment</li>
+
+<li>
+<b>-client_timeout</b> sets the timeout value for the client waiting for
+a reply from the server for RPC operations to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server_timeout</b> sets the timeout value for the server to determine
+an idle client is gone to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server </b>specifies the <b><i>hostname</i></b> of the server
+to connect to in the <a href="../../docs/api_c/env_set_server.html">DBENV->set_server</a>
+call.</li>
+
+<li>
+<b>-rep_client </b>sets the newly created environment to be a
+replication client, using the <a href="../../docs/api_c/rep_client.html">
+DBENV->rep_client</a> call.</li>
+
+<li>
+<b>-rep_master </b>sets the newly created environment to be a
+replication master, using the <a href="../../docs/api_c/rep_master.html">
+DBENV->rep_master</a> call.</li>
+
+<li>
+<b>-rep_transport </b>specifies the replication transport function,
+using the
+<a href="../../docs/api_c/rep_transport.html">DBENV->set_rep_transport</a>
+call. This site's machine ID is set to <b><i>machineid</i></b> and
+the send function, a Tcl proc, is set to <b><i>sendproc</i></b>.</li>
+
+</ul>
+
+This command will invoke the <a href="../../docs/api_c/env_create.html">db_env_create</a>
+function.&nbsp; After it successfully gets a handle to an environment,
+we bind it to a new Tcl command of the form <b><i>envX</i></b>, where X
+is an integer starting at&nbsp; 0 (e.g. <b>env0, env1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level environment
+command function.&nbsp; It is through this handle that the user can access
+all the commands described in the <a href="#Environment Commands">Environment
+Commands</a> section.&nbsp; Internally, the handle we get back from DB
+will be stored as the <i>ClientData</i> portion of the new command set
+so that all future environment calls will have that handle readily available.&nbsp;
+Then we call the <a href="../../docs/api_c/env_open.html">DBENV->open</a>
+method call and possibly some number of setup calls as described above.
+<p>
+<hr WIDTH="100%">
+<br><a NAME="> <env> verbose which on|off"></a><b>> &lt;env> verbose <i>which</i>
+on|off</b>
+<p>This command controls the use of debugging output for the environment.&nbsp;
+This command directly translates to a call to the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The user specifies
+<b><i>which</i></b>
+subsystem to control, and indicates whether debug messages should be turned
+<b>on</b>
+or <b>off</b> for that subsystem.&nbsp; The value of <b><i>which</i></b>
+must be one of the following:
+<ul>
+<li>
+<b>chkpt</b> - Chooses the checkpointing code by using the DB_VERB_CHKPOINT
+value</li>
+
+<li>
+<b>deadlock </b>- Chooses the deadlocking code by using the DB_VERB_DEADLOCK
+value</li>
+
+<li>
+<b>recovery </b>- Chooses the recovery code by using the DB_VERB_RECOVERY
+value</li>
+
+<li>
+<b>wait </b>- Chooses the waitsfor code by using the DB_VERB_WAITSFOR value</li>
+</ul>
+
+<hr WIDTH="100%">
+<p><a NAME="> <env> close"></a><b>> &lt;env> close</b>
+<p>This command closes an environment and deletes the handle.&nbsp; This
+command directly translates to a call to the <a href="../../docs/api_c/env_close.html">DBENV->close</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<p>Additionally, since the handle is no longer valid, we will call <i>Tcl_DeleteCommand()
+</i>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<p>Also, the close command will automatically abort any <a href="txn.html">transactions</a>
+and close any <a href="mpool.html">mpool</a> memory files.&nbsp; As such
+we must maintain a list of open transaction and mpool handles so that we
+can call <i>Tcl_DeleteCommand</i> on those as well.
+<p>
+<hr WIDTH="100%">
+
+<b>> berkdb envremove<br>
+[-data_dir <i>directory</i>]<br>
+[-force]<br>
+[-home <i>directory</i>]<br>
+[-log_dir <i>directory</i>]<br>
+[-overwrite]<br>
+[-tmp_dir <i>directory</i>]<br>
+[-use_environ]<br>
+[-use_environ_root]</b>
+
+<p>This command removes the environment if it is not in use and deletes
+the handle.&nbsp; This command directly translates to a call to the <a href="../../docs/api_c/env_remove.html">DBENV->remove</a>
+method call.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-force</b> selects the DB_FORCE flag to remove even if other processes
+have the environment open</li>
+
+<li>
+<b>-home <i>directory</i> </b>specifies the home directory of the environment</li>
+
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
+
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
+
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
+
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
+
+<li>
+<b>-use_environ </b>selects the DB_USE_ENVIRON flag to affect file naming</li>
+
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to affect
+file naming</li>
+</ul>
+
+</body>
+</html>
diff --git a/storage/bdb/tcl/docs/historic.html b/storage/bdb/tcl/docs/historic.html
new file mode 100644
index 00000000000..85f474fbc0f
--- /dev/null
+++ b/storage/bdb/tcl/docs/historic.html
@@ -0,0 +1,169 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Compatibility Commands"></A>Compatibility Commands</H2>
+The compatibility commands for old Dbm and Ndbm are described in the <A HREF="../../docs/api_c/dbm.html">dbm</A>
+manpage.
+<P><B>> berkdb dbminit <I>filename</I></B>
+<P>This command will invoke the dbminit function.&nbsp;&nbsp; <B><I>Filename</I></B>
+is used as the name of the database.
+<P>
+<HR WIDTH="100%"><B>> berkdb dbmclose</B>
+<P>This command will invoke the dbmclose function.
+<P>
+<HR WIDTH="100%"><B>> berkdb fetch <I>key</I></B>
+<P>This command will invoke the fetch function.&nbsp;&nbsp; It will return
+the data associated with the given <B><I>key </I></B>or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb store <I>key data</I></B>
+<P>This command will invoke the store function.&nbsp;&nbsp; It will store
+the <B><I>key/data</I></B> pair.&nbsp; It will return a 0 on success or
+throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb delete <I>key</I></B>
+<P>This command will invoke the deletet function.&nbsp;&nbsp; It will delete
+the <B><I>key</I></B> from the database.&nbsp; It will return a 0 on success
+or throw a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb firstkey</B>
+<P>This command will invoke the firstkey function.&nbsp;&nbsp; It will
+return the first key in the database or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb nextkey <I>key</I></B>
+<P>This command will invoke the nextkey function.&nbsp;&nbsp; It will return
+the next key after the given <B><I>key</I></B> or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hcreate <I>nelem</I></B>
+<P>This command will invoke the hcreate function with <B><I>nelem</I></B>
+elements.&nbsp; It will return a 0 on success or a Tcl error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hsearch <I>key data action</I></B>
+<P>This command will invoke the hsearch function with <B><I>key</I></B>
+and <B><I>data</I></B>.&nbsp; The <B><I>action</I></B> must be either <B>find</B>
+or <B>enter</B>.&nbsp; If it is <B>find</B>, it will return the resultant
+data.&nbsp; If it is <B>enter</B>, it will return a 0 on success or a Tcl
+error.
+<P>
+<HR WIDTH="100%"><B>> berkdb hdestroy</B>
+<P>This command will invoke the hdestroy function.&nbsp; It will return
+a 0.
+<HR WIDTH="100%"><B>> berkdb ndbm_open [-create] [-rdonly] [-truncate]
+[-mode
+<I>mode</I>] [--] <I>filename</I></B>
+<P>This command will invoke the dbm_open function.&nbsp;&nbsp;&nbsp; After
+it successfully gets a handle to a database, we bind it to a new Tcl command
+of the form <B><I>ndbmX, </I></B>where X is an integer starting at 0 (e.g.
+<B>ndbm0,
+ndbm1, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to
+create the top level database function.&nbsp; It is through this handle
+that the user can access all of the commands described below.&nbsp; Internally,
+the database handle is sent as the <I>ClientData</I> portion of the new
+command set so that all future database calls access the appropriate handle.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-- </B>- Terminate the list of options and use remaining arguments as
+the file or subdb names (thus allowing the use of filenames beginning with
+a dash '-')</LI>
+
+<LI>
+<B>-create</B> selects the O_CREAT flag&nbsp; to create underlying files</LI>
+
+<LI>
+<B>-rdonly</B> selects the O_RDONLY flag for opening in read-only mode</LI>
+
+<LI>
+<B>-truncate</B> selects the O_TRUNC flag to truncate the database</LI>
+
+<LI>
+<B>-mode<I> mode</I></B> specifies the mode for created files</LI>
+
+<LI>
+<B><I>filename</I></B> indicates the name of the database</LI>
+</UL>
+
+<P><BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> close</B>
+<P>This command closes the database and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the dbm_close function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> clearerr</B>
+<P>This command clears errors&nbsp; the database.&nbsp;&nbsp; This command
+directly translates to the dbm_clearerr function call.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> delete <I>key</I></B>
+<P>This command deletes the <B><I>key</I></B> from thedatabase.&nbsp;&nbsp;
+This command directly translates to the dbm_delete function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> dirfno</B>
+<P>This command directly translates to the dbm_dirfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> error</B>
+<P>This command returns the last error.&nbsp;&nbsp; This command directly
+translates to the dbm_error function call.&nbsp; It returns an error string..
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> fetch <I>key</I></B>
+<P>This command gets the given <B><I>key</I></B> from the database.&nbsp;&nbsp;
+This command directly translates to the dbm_fetch function call.&nbsp;
+It returns either the data,&nbsp; or it throws a Tcl error with a system
+message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> firstkey</B>
+<P>This command returns the first key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_firstkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> nextkey</B>
+<P>This command returns the next key in the database.&nbsp;&nbsp; This
+command directly translates to the dbm_nextkey function call.&nbsp; It
+returns either the key,&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> pagfno</B>
+<P>This command directly translates to the dbm_pagfno function call.&nbsp;
+It returns either resultts,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> rdonly</B>
+<P>This command changes the database to readonly.&nbsp;&nbsp; This command
+directly translates to the dbm_rdonly function call.&nbsp; It returns either
+a 0 (for success),&nbsp; or it throws a Tcl error with a system message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;ndbm> store <I>key data </I>insert|replace</B>
+<P>This command puts the given <B><I>key</I></B> and <B><I>data</I></B>
+pair into the database.&nbsp;&nbsp; This command directly translates to
+the dbm_store function call.&nbsp; It will either <B>insert</B> or <B>replace</B>
+the data based on the action given in the third argument.&nbsp; It returns
+either a 0 (for success),&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%">
+</BODY>
+</HTML>
diff --git a/storage/bdb/tcl/docs/index.html b/storage/bdb/tcl/docs/index.html
new file mode 100644
index 00000000000..845b6ca81e2
--- /dev/null
+++ b/storage/bdb/tcl/docs/index.html
@@ -0,0 +1,51 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<CENTER>
+<H1>
+Complete Tcl Interface for Berkeley DB</H1></CENTER>
+
+<UL type=disc>
+<LI>
+<A HREF="../../docs/api_tcl/tcl_index.html">General use Berkeley DB commands</A></LI>
+</UL>
+
+<UL type=disc>
+<LI>
+<A HREF="./env.html">Environment commands</A></LI>
+
+<LI>
+<A HREF="./lock.html">Locking commands</A></LI>
+
+<LI>
+<A HREF="./log.html">Logging commands</A></LI>
+
+<LI>
+<A HREF="./mpool.html">Memory Pool commands</A></LI>
+
+<LI>
+<A HREF="./rep.html">Replication commands</A></LI>
+
+<LI>
+<A HREF="./txn.html">Transaction commands</A></LI>
+</UL>
+
+<UL>
+<LI>
+<A HREF="./db.html">Access Method commands</A></LI>
+
+<LI>
+<A HREF="./test.html">Debugging and Testing</A></LI>
+
+<LI>
+<A HREF="./historic.html">Compatibility commands</A></LI>
+
+<LI>
+<A HREF="./library.html">Convenience commands</A></LI>
+</UL>
diff --git a/storage/bdb/tcl/docs/library.html b/storage/bdb/tcl/docs/library.html
new file mode 100644
index 00000000000..bfb1588c3f2
--- /dev/null
+++ b/storage/bdb/tcl/docs/library.html
@@ -0,0 +1,27 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+<HR WIDTH="100%">
+<H2>
+<A NAME="Convenience Commands"></A>Convenience Commands</H2>
+The convenience commands are provided for ease of use with the DB test
+suite.
+<P><B>> berkdb rand</B>
+<P>This command will invoke the rand function and return the random number.
+<P>
+<HR WIDTH="100%"><B>> berkdb random_int <I>low high</I></B>
+<P>This command will invoke the rand function and return a number between
+<B><I>low</I></B>
+and <B><I>high</I></B>.
+<P>
+<HR WIDTH="100%">
+<P><B>> berkdb srand <I>seed</I></B>
+<P>This command will invoke the srand function with the given <B><I>seed</I></B>
+and return 0.
+<P>
+<HR WIDTH="100%">
diff --git a/storage/bdb/tcl/docs/lock.html b/storage/bdb/tcl/docs/lock.html
new file mode 100644
index 00000000000..d65142b798b
--- /dev/null
+++ b/storage/bdb/tcl/docs/lock.html
@@ -0,0 +1,207 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+<a NAME="Locking Commands"></a>Locking Commands</h2>
+Most locking commands work with the environment handle.&nbsp; However,
+when a user gets a lock we create a new lock handle that they then use
+with in a similar manner to all the other handles to release the lock.&nbsp;
+We present the general locking functions first, and then those that manipulate
+locks.
+<p><b>> &lt;env> lock_detect [default|oldest|youngest|random]</b>
+<p>This command runs the deadlock detector.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_detect.html">lock_detect</a> DB call.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The first argument sets the policy
+for deadlock as follows:
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection
+(default if not specified)</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> lock_stat</b>
+<p>This command returns a list of name/value pairs where the names correspond
+to the C-structure field names of DB_LOCK_STAT and the values are the data
+returned.&nbsp; This command is a direct translation of the <a href="../../docs/api_c/lock_stat.html">lock_stat</a>
+DB call.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id</b>
+<p>This command returns a unique locker ID value.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_id.html">lock_id</a> DB call.
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_free&nbsp; </b><i>locker</i>
+<p>This command frees the locker allockated by the lock_id call. It directly
+translates to the&nbsp; <a href="../../docs/api_c/lock_id.html">lock_id_free
+</a>DB
+call.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_set&nbsp; </b><i>current
+max</i>
+<p>This&nbsp; is a diagnostic command to set the locker id that will get
+allocated next and the maximum id that
+<br>will trigger the id reclaim algorithm.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_get"></a><b>> &lt;env> lock_get [-nowait]<i>lockmode
+locker obj</i></b>
+<p>This command gets a lock. It will invoke the <a href="../../docs/api_c/lock_get.html">lock_get</a>
+function.&nbsp; After it successfully gets a handle to a lock, we bind
+it to a new Tcl command of the form <b><i>$env.lockX</i></b>, where X is
+an integer starting at&nbsp; 0 (e.g. <b>$env.lock0, $env.lock1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level locking
+command function.&nbsp; It is through this handle that the user can release
+the lock.&nbsp; Internally, the handle we get back from DB will be stored
+as the <i>ClientData</i> portion of the new command set so that future
+locking calls will have that handle readily available.
+<p>The arguments are:
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
+
+<li>
+<b><i>obj</i></b> specifies an object to lock</li>
+
+<li>
+the <b><i>lock mode</i></b> is specified as one of the following:</li>
+
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
+
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
+
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
+
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
+
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
+
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
+
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;lock> put</b>
+<p>This command releases the lock referenced by the command.&nbsp; It is
+a direct translation of the <a href="../../docs/api_c/lock_put.html">lock_put</a>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.&nbsp; Additionally, since
+the handle is no longer valid, we will call
+<i>Tcl_DeleteCommand()
+</i>so
+that further uses of the handle will be dealt with properly by Tcl itself.
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_vec [-nowait] <i>locker
+</i>{get|put|put_all|put_obj
+[<i>obj</i>] [<i>lockmode</i>] [<i>lock</i>]} ...</b>
+<p>This command performs a series of lock calls.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/lock_vec.html">lock_vec</a> function.&nbsp;
+This command will return a list of the return values from each operation
+specified in the argument list.&nbsp; For the 'put' operations the entry
+in the return value list is either a 0 (for success) or an error.&nbsp;
+For the 'get' operation, the entry is the lock widget handle, <b>$env.lockN</b>
+(as described above in <a href="#> <env> lock_get">&lt;env> lock_get</a>)
+or an error.&nbsp; If an error occurs, the return list will contain the
+return values for all the successful operations up the erroneous one and
+the error code for that operation.&nbsp; Subsequent operations will be
+ignored.
+<p>As for the other operations, if we are doing a 'get' we will create
+the commands and if we are doing a 'put' we will have to delete the commands.&nbsp;
+Additionally, we will have to do this after the call to the DB lock_vec
+and iterate over the results, creating and/or deleting Tcl commands.&nbsp;
+It is possible that we may return a lock widget from a get operation that
+is considered invalid, if, for instance, there was a <b>put_all</b> operation
+performed later in the vector of operations.&nbsp; The arguments are:
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
+
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
+
+<li>
+the lock vectors are tuple consisting of {an operation, lock object, lock
+mode, lock handle} where what is required is based on the operation desired:</li>
+
+<ul>
+<li>
+<b>get</b> specifes DB_LOCK_GET to get a lock.&nbsp; Requires a tuple <b>{get
+<i>objmode</i>}
+</b>where
+<b><i>mode</i></b>
+is:</li>
+
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
+
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
+
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
+
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
+
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
+
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
+
+<li>
+<b>put</b> specifies DB_LOCK_PUT to release a <b><i>lock</i></b>.&nbsp;
+Requires a tuple <b>{put <i>lock}</i></b></li>
+
+<li>
+<b>put_all </b>specifies DB_LOCK_PUT_ALL to release all locks held by <b><i>locker</i></b>.&nbsp;
+Requires a tuple <b>{put_all}</b></li>
+
+<li>
+<b>put_obj</b> specifies DB_LOCK_PUT_OBJ to release all locks held by <b><i>locker</i></b>
+associated with the given <b><i>obj</i></b>.&nbsp; Requires a tuple <b>{put_obj
+<i>obj}</i></b></li>
+</ul>
+</ul>
+
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_timeout <i>timeout</i></b>
+<p>This command sets the lock timeout for all future locks in this environment.&nbsp;
+The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/storage/bdb/tcl/docs/log.html b/storage/bdb/tcl/docs/log.html
new file mode 100644
index 00000000000..49f2f0ad2e0
--- /dev/null
+++ b/storage/bdb/tcl/docs/log.html
@@ -0,0 +1,124 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Logging Commands"></A>Logging Commands</H2>
+Logging commands work from the environment handle to control the use of
+the log files.&nbsp; Log files are opened when the environment is opened
+and closed when the environment is closed.&nbsp; In all of the commands
+in the logging subsystem that take or return a log sequence number, it
+is of the form:
+<BR><B>{<I>fileid offset</I>}</B>
+<BR>where the <B><I>fileid</I></B> is an identifier of the log file, as
+returned from the <A HREF="#> <env> log_get">log_get</A> call.
+<P><B>> &lt;env> log_archive [-arch_abs] [-arch_data] [-arch_log]</B>
+<P>This command returns&nbsp; a list of log files that are no longer in
+use.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_archive.html">log_archive</A>
+function. The arguments are:
+<UL>
+<LI>
+<B>-arch_abs </B>selects DB_ARCH_ABS to return all pathnames as absolute
+pathnames</LI>
+
+<LI>
+<B>-arch_data </B>selects DB_ARCH_DATA to return a list of database files</LI>
+
+<LI>
+<B>-arch_log </B>selects DB_ARCH_LOG to return a list of log files</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_compare <I>lsn1 lsn2</I></B>
+<P>This command compares two log sequence numbers, given as <B><I>lsn1</I></B>
+and <B><I>lsn2</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_compare.html">log_compare</A>
+function.&nbsp; It will return a -1, 0, 1 to indicate if <B><I>lsn1</I></B>
+is less than, equal to or greater than <B><I>lsn2</I></B> respectively.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_file <I>lsn</I></B>
+<P>This command returns&nbsp; the file name associated with the given <B><I>lsn</I></B>.&nbsp;
+It is a direct call to the <A HREF="../../docs/api_c/log_file.html">log_file</A>
+function.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_flush [<I>lsn</I>]</B>
+<P>This command&nbsp; flushes the log up to the specified <B><I>lsn</I></B>
+or flushes all records if none is given&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/log_flush.html">log_flush</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><A NAME="<env> log_get"></A><B>> &lt;env> log_get<I> </I>[-checkpoint]
+[-current] [-first] [-last] [-next] [-prev] [-set <I>lsn</I>]</B>
+<P>This command retrieves a record from the log according to the <B><I>lsn</I></B>
+given and returns it and the data.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_get.html">log_get</A>
+function.&nbsp; It is a way of implementing a manner of log iteration similar
+to <A HREF="../../docs/api_tcl/db_cursor.html">cursors</A>.&nbsp;&nbsp;
+The information we return is similar to database information.&nbsp; We
+return a list where the first item is the LSN (which is a list itself)
+and the second item is the data.&nbsp; So it looks like, fully expanded,
+<B>{{<I>fileid</I>
+<I>offset</I>}
+<I>data</I>}.</B>&nbsp;
+In the case where DB_NOTFOUND is returned, we return an empty list <B>{}</B>.&nbsp;
+All other errors return a Tcl error.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag to return the LSN/data
+pair of the last record written through <A HREF="#> <env> log_put">log_put</A>
+with DB_CHECKPOINT specified</LI>
+
+<LI>
+<B>-current</B> selects the DB_CURRENT flag to return the current record</LI>
+
+<LI>
+<B>-first</B> selects the DB_FIRST flag to return the first record in the
+log.</LI>
+
+<LI>
+<B>-last </B>selects the DB_LAST flag to return the last record in the
+log.</LI>
+
+<LI>
+<B>-next</B> selects the DB_NEXT flag to return the next record in the
+log.</LI>
+
+<LI>
+<B>-prev </B>selects the DB_PREV flag to return the&nbsp; previous record
+in the log.</LI>
+
+<LI>
+<B>-set</B> selects the DB_SET flag to return the record specified by the
+given <B><I>lsn</I></B></LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <env> log_put"></A><B>> &lt;env> log_put<I> </I>[-checkpoint]
+[-flush] <I>record</I></B>
+<P>This command stores a <B><I>record</I></B> into the log and returns
+the LSN of the log record.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_put.html">log_put</A>
+function.&nbsp; It returns either an LSN or it throws a Tcl error with
+a system message.&nbsp;<B> </B>The arguments are:
+<UL>
+<LI>
+<B>-checkpoint </B>selects the DB_CHECKPOINT flag</LI>
+
+<LI>
+<B>-flush </B>selects the DB_FLUSH flag to flush the log to disk.</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> log_stat</B>
+<P>This command returns&nbsp; the statistics associated with the logging
+subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_stat.html">log_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_LOG_STAT
+structure.
+</BODY>
+</HTML>
diff --git a/storage/bdb/tcl/docs/mpool.html b/storage/bdb/tcl/docs/mpool.html
new file mode 100644
index 00000000000..7f2359b36e9
--- /dev/null
+++ b/storage/bdb/tcl/docs/mpool.html
@@ -0,0 +1,190 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Memory Pool Commands"></A>Memory Pool Commands</H2>
+Memory pools are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the pool and&nbsp; then use it for a variety of operations.&nbsp;
+Some of the memory pool commands use the environment instead. Those are
+presented first.
+<P><B>> &lt;env> mpool_stat</B>
+<P>This command returns&nbsp; the statistics associated with the memory
+pool subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_stat.html">memp_stat</A>
+function.&nbsp; It returns a list of name/value pairs of the DB_MPOOL_STAT
+structure.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_sync <I>lsn</I></B>
+<P>This command flushes the memory pool for all pages with a log sequence
+number less than <B><I>lsn</I></B>.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_sync.html">memp_sync&nbsp;</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<BR>
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mpool_trickle <I>percent</I></B>
+<P>This command tells DB to ensure that at least <B><I>percent</I></B>
+percent of the pages are clean by writing out enough to dirty pages to
+achieve that percentage.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/memp_trickle.html">memp_trickle</A>
+function.&nbsp; The command will return the number of pages actually written.&nbsp;
+It returns either the number of pages on success, or it throws a Tcl error
+with a system message.
+<BR>
+<HR WIDTH="100%">
+<P><B>> &lt;env> mpool [-create] [-nommap] [-rdonly] [-mode <I>mode</I>]
+-pagesize <I>size</I> [<I>file</I>]</B>
+<P>This command creates a new memory pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fopen.html">memp_fopen</A>
+function.&nbsp; After it successfully gets a handle to a memory pool, we
+bind it to a new Tcl command of the form <B><I>$env.mpX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0, $env.mp1, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level memory
+pool functions.&nbsp; It is through this handle that the user can manipulate
+the pool.&nbsp; Internally, the handle we get back from DB will be stored
+as the <I>ClientData</I> portion of the new command set so that future
+memory pool calls will have that handle readily available.&nbsp; Additionally,
+we need to maintain this handle in relation to the environment so that
+if the user calls <A HREF="../../docs/api_tcl/env_close.html">&lt;env> close</A> without closing
+the memory pool we can properly clean up.&nbsp; The arguments are:
+<UL>
+<LI>
+<B><I>file</I></B> is the name of the file to open</LI>
+
+<LI>
+<B>-create </B>selects the DB_CREATE flag to create underlying file</LI>
+
+<LI>
+<B>-mode <I>mode </I></B>sets the permissions of created file to <B><I>mode</I></B></LI>
+
+<LI>
+<B>-nommap</B> selects the DB_NOMMAP flag to disallow using mmap'ed files</LI>
+
+<LI>
+<B>-pagesize</B> sets the underlying file page size to <B><I>size</I></B></LI>
+
+<LI>
+<B>-rdonly </B>selects the DB_RDONLY flag for read only access</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> close</B>
+<P>This command closes the memory pool.&nbsp; It is a direct call to the
+<A HREF="../../docs/api_c/memp_fclose.html">memp_close</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<P>Additionally, since the handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the environment.&nbsp;
+We will go through the list of pinned pages that were acquired by the <A HREF="#> <mp> get">get</A>
+command and
+<A HREF="#> <pg> put">put</A> them back.
+<HR WIDTH="100%">
+<BR><B>> &lt;mp> fsync</B>
+<P>This command flushes all of the file's dirty pages to disk.&nbsp; It
+is a direct call to the <A HREF="../../docs/api_c/memp_fsync.html">memp_fsync</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message.
+<HR WIDTH="100%">
+<BR><A NAME="> <mp> get"></A><B>> &lt;mp> get [-create] [-last] [-new]
+[<I>pgno</I>]</B>
+<P>This command gets the&nbsp; <B><I>pgno </I></B>page from the memory
+pool.&nbsp; It invokes the <A HREF="../../docs/api_c/memp_fget.html">memp_fget</A>
+function and possibly the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A>
+function if any options are chosen to set the page characteristics.&nbsp;
+After it successfully gets a handle to a page,&nbsp; we bind it to and
+return a new Tcl command of the form <B><I>$env.mpN.pX</I></B>, where X
+is an integer starting at&nbsp; 0 (e.g. <B>$env.mp0.p0, $env.mp1.p0, </B>etc).&nbsp;
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level page functions.&nbsp;
+It is through this handle that the user can manipulate the page.&nbsp;
+Internally, the handle we get back from DB will be stored as the <I>ClientData</I>
+portion of the new command set.&nbsp; We need to store this handle in&nbsp;
+relation to the memory pool handle so that if the memory pool is closed,
+we will <A HREF="#> <pg> put">put</A> back the pages (setting the discard
+flag) and delete that set of commands.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-create </B>selects the DB_MPOOL_CREATE flag&nbsp; to create the page
+if it does not exist.</LI>
+
+<LI>
+<B>-last</B> selects the DB_MPOOL_LAST flag to return the last page in
+the file</LI>
+
+<LI>
+<B>-new</B> selects the DB_MPOOL_NEW flag to create a new page</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> pgnum</B>
+<P>This command returns the page number associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> pgsize</B>
+<P>This command returns the page size associated with this memory pool
+page.&nbsp; Primarily it will be used after an <A HREF="#> <mp> get">&lt;mp>
+get</A> call.
+<BR>
+<HR WIDTH="100%"><B>> &lt;pg> set [-clean] [-dirty] [-discard]</B>
+<P>This command sets the characteristics of the page.&nbsp; It is a direct
+call to the <A HREF="../../docs/api_c/memp_fset.html">memp_fset</A> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><A NAME="> <pg> put"></A><B>> &lt;pg> put [-clean] [-dirty] [-discard]</B>
+<P>This command will put back the page to the memory pool.&nbsp; It is
+a direct call to the <A HREF="../../docs/api_c/memp_fput.html">memp_fput</A>
+function.&nbsp; It returns either a 0 (for success), a DB error message
+or it throws a Tcl error with a system message. Additionally, since the
+handle is no longer valid, we will call
+<I>Tcl_DeleteCommand()
+</I>so that
+further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+We must also remove the reference to this handle from the memory pool.
+<P>The arguments are:
+<UL>
+<LI>
+<B>-clean</B> selects the DB_MPOOL_CLEAN flag to indicate this is a clean
+page</LI>
+
+<LI>
+<B>-dirty</B> selects the DB_MPOOL_DIRTY flag to indicate this page should
+be flushed before eviction</LI>
+
+<LI>
+<B>-discard</B> selects the DB_MPOOL_DISCARD flag to indicate this page
+is unimportant</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> init <I>val|string</I></B>
+<P>This command initializes the page to the <B><I>val</I></B> given or
+places the <B><I>string</I></B> given at the beginning of the page.&nbsp;
+It returns a 0 for success or it throws a Tcl error with an error message.
+<P>
+<HR WIDTH="100%">
+<BR><B>> &lt;pg> is_setto <I>val|string</I></B>
+<P>This command verifies the page contains the <B><I>val</I></B> given
+or checks that the <B>string</B> given is at the beginning of the page.&nbsp;
+It returns a 1 if the page is correctly set to the value and a 0 otherwise.
diff --git a/storage/bdb/tcl/docs/rep.html b/storage/bdb/tcl/docs/rep.html
new file mode 100644
index 00000000000..079fe443a63
--- /dev/null
+++ b/storage/bdb/tcl/docs/rep.html
@@ -0,0 +1,51 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <title>Replication commands</title>
+</head>
+<body>
+
+<h2>
+<a NAME="Replication Commands"></a>Replication Commands</h2>
+Replication commands are invoked from the environment handle, after
+it has been opened with the appropriate flags defined
+<a href="./env.html">here</a>.<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> rep_process_message <i>machid</i> <i>control</i>
+<i>rec</i></b>
+<p>This command processes a single incoming replication message.&nbsp; It
+is a direct translation of the <a
+href="../../docs/api_c/rep_process_message.html">rep_process_message</a>
+function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>machid </b>is the machine ID of the machine that <i>sent</i> this
+message.</li>
+
+<li>
+<b>control</b> is a binary string containing the exact contents of the
+<b><i>control</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+
+<li>
+<b>rec</b> is a binary string containing the exact contents of the
+<b><i>rec</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> rep_elect <i>nsites</i> <i>pri</i> <i>wait</i>
+<i>sleep</i></b>
+<p>This command causes a replication election.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/rep_elect.html">rep_elect</a> function.&nbsp;
+Its arguments, all integers, correspond exactly to that C function's
+parameters.
+It will return a list containing two integers, which contain,
+respectively, the integer values returned in the C function's
+<i><b>midp</b></i> and <i><b>selfp</b></i> parameters.
+</body>
+</html>
diff --git a/storage/bdb/tcl/docs/test.html b/storage/bdb/tcl/docs/test.html
new file mode 100644
index 00000000000..603ae56a51e
--- /dev/null
+++ b/storage/bdb/tcl/docs/test.html
@@ -0,0 +1,150 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<HTML>
+<HEAD>
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+ <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
+</HEAD>
+<BODY>
+
+<H2>
+<A NAME="Debugging"></A>Debugging and Testing</H2>
+We have imported the debugging system from the old test suite into the
+new interface to aid in debugging problems.&nbsp; There are several variables
+that are available both in gdb as globals to the C code, and variables
+in Tcl that the user can set.&nbsp; These variables are linked together
+so that changes in one venue are reflected in the other.&nbsp; The names
+of the variables have been modified a bit to reduce the likelihood
+<BR>of namespace trampling.&nbsp; We have added a double underscore to
+all the names.
+<P>The variables are all initialized to zero (0) thus resulting in debugging
+being turned off.&nbsp; The purpose of the debugging, fundamentally, is
+to allow the user to set a breakpoint prior to making a DB call.&nbsp;
+This breakpoint is set in the <I>__db_loadme() </I>function.&nbsp; The
+user may selectively turn on various debugging areas each controlled by
+a separate variable (note they all have two (2) underscores prepended to
+the name):
+<UL>
+<LI>
+<B>__debug_on</B> - Turns on the debugging system.&nbsp; This must be on
+for any debugging to occur</LI>
+
+<LI>
+<B>__debug_print - </B>Turns on printing a debug count statement on each
+call</LI>
+
+<LI>
+<B>__debug_test -</B> Hits the breakpoint in <I>__db_loadme</I> on the
+specific iteration</LI>
+
+<LI>
+<B>__debug_stop </B>- Hits the breakpoint in <I>__db_loadme</I> on every
+(or the next) iteration</LI>
+</UL>
+<B>Note to developers:</B>&nbsp; Anyone extending this interface must place
+a call to <B>_debug_check()</B> (no arguments) before every call into the
+DB library.
+<P>There is also a command available that will force a call to the _debug_check
+function.
+<P><B>> berkdb debug_check</B>
+<P>
+<HR WIDTH="100%">
+<BR>For testing purposes we have added several hooks into the DB library
+and a small interface into the environment and/or database commands to
+manipulate the hooks.&nbsp; This command interface and the hooks and everything
+that goes with it is only enabled when the test option is configured into
+DB.
+<P><B>> &lt;env> test copy <I>location</I></B>
+<BR><B>> &lt;db> test copy <I>location</I></B>
+<BR><B>> &lt;env> test abort <I>location</I></B>
+<BR><B>> &lt;db> test abort <I>location</I></B>
+<P>In order to test recovery we need to be able to abort the creation or
+deletion process at various points.&nbsp; Also we want to invoke a copy
+function to copy the database file(s)&nbsp; at various points as well so
+that we can obtain before/after snapshots of the databases.&nbsp; The interface
+provides the test command to specify a <B><I>location</I></B> where we
+wish to invoke a <B>copy</B> or an <B>abort</B>.&nbsp; The command is available
+from either the environment or the database for convenience.&nbsp; The
+<B><I>location</I></B>
+can be one of the following:
+<UL>
+<LI>
+<B>none -</B> Clears the location</LI>
+
+<LI>
+<B>preopen -</B> Sets the location prior to the __os_open call in the creation
+process</LI>
+
+<LI>
+<B>postopen</B> - Sets the location to immediately following the __os_open
+call in creation</LI>
+
+<LI>
+<B>postlogmeta</B> - Sets the location to immediately following the __db_log_page
+call to log the meta data in creation.&nbsp; Only valid for Btree.</LI>
+
+<LI>
+<B>postlog</B> - Sets the location to immediately following the last (or
+only) __db_log_page call in creation.</LI>
+
+<LI>
+<B>postsync</B> - Sets the location to immediately following the sync of
+the log page in creation.</LI>
+
+<LI>
+<B>prerename</B> - Sets the location prior to the __os_rename call in the
+deletion process.</LI>
+
+<LI>
+<B>postrename</B> - Sets the location to immediately following the __os_rename
+call in deletion</LI>
+</UL>
+
+<HR WIDTH="100%">
+<BR><B>> &lt;env> mutex <I>mode nitems</I></B>
+<P>This command creates a mutex region for testing.&nbsp; It sets the mode
+of the region to <B><I>mode</I></B> and sets up for <B><I>nitems</I></B>
+number of mutex entries.&nbsp; After we successfully get a handle to a
+mutex we create a command of the form <B><I>$env.mutexX</I></B>, where
+X is an integer starting at&nbsp; 0 (e.g. <B>$env.mutex0, $env.mutex1,
+</B>etc).&nbsp;&nbsp;
+We use the <I>Tcl_CreateObjCommand()&nbsp;</I> to create the top level
+mutex function.&nbsp; It is through this handle that the user can access
+all of the commands described below.&nbsp; Internally, the mutex handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future mutex calls access the appropriate handle.
+<P>
+<HR WIDTH="100%"><B>> &lt;mutex> close</B>
+<P>This command closes the mutex and renders the handle invalid.&nbsp;&nbsp;
+This command directly translates to the __db_r_detach function call.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
+</I>so
+that further uses of the handle will be dealt with properly by Tcl itself.&nbsp;
+<HR WIDTH="100%"><B>> &lt;mutex> get <I>id</I></B>
+<P>This command locks the mutex identified by <B><I>id</I></B>.&nbsp; It
+returns either a 0 (for success),&nbsp; or it throws a Tcl error with a
+system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> release <I>id</I></B>
+<P>This command releases the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> getval <I>id</I></B>
+<P>This command gets the value stored for the mutex identified by <B><I>id</I></B>.&nbsp;
+It returns either the value,&nbsp; or it throws a Tcl error with a system
+message.
+<BR>
+<HR WIDTH="100%"><B>> &lt;mutex> setval <I>id val</I></B>
+<P>This command sets the value stored for the mutex identified by <B><I>id
+</I></B>to
+<B><I>val</I></B>.&nbsp;
+It returns either a 0 (for success),&nbsp; or it throws a Tcl error with
+a system message.
+<BR>
+<HR WIDTH="100%">
+<BR>&nbsp;
+</BODY>
+</HTML>
diff --git a/storage/bdb/tcl/docs/txn.html b/storage/bdb/tcl/docs/txn.html
new file mode 100644
index 00000000000..07c88c0fe1d
--- /dev/null
+++ b/storage/bdb/tcl/docs/txn.html
@@ -0,0 +1,67 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+<a NAME="Transaction Commands"></a>Transaction Commands</h2>
+Transactions are used in a manner similar to the other subsystems.&nbsp;
+We create a handle to the transaction and&nbsp; then use it for a variety
+of operations.&nbsp; Some of the transaction commands use the environment
+instead.&nbsp; Those are presented first.&nbsp; The transaction command
+handle returned is the handle used by the various commands that can be
+transaction protected, such as <a href="../../docs/api_tcl/db_cursor.html">cursors</a>.
+<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> txn_checkpoint [-kbyte <i>kb</i>] [-min <i>min</i>]</b>
+<p>This command causes a checkpoint of the transaction region.&nbsp; It
+is a direct translation of the <a href="../../docs/api_c/txn_checkpoint.html">txn_checkpoint
+</a>function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>-kbyte </b>causes the checkpoint to occur only if <b><i>kb</i></b> kilobytes
+of log data has been written since the last checkpoint</li>
+
+<li>
+<b>-min</b> causes the checkpoint to occur only if <b><i>min</i></b> minutes
+have passed since the last checkpoint</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_stat</b>
+<p>This command returns transaction statistics.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/txn_stat.html">txn_stat</a> function.&nbsp;
+It will return a list of name/value pairs that correspond to the DB_TXN_STAT
+structure.
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_id_set&nbsp;</b><i> current max</i>
+<p>This is a diagnosic command that sets the next transaction id to be
+allocated and the maximum transaction
+<br>id, which is the point at which the relcaimation algorthm is triggered.
+<hr WIDTH="100%">
+<br><b>>&nbsp; &lt;txn> id</b>
+<p>This command returns the transaction id.&nbsp; It is a direct call to
+the <a href="../../docs/api_c/txn_id.html">txn_id</a> function.&nbsp; The
+typical use of this identifier is as the <b><i>locker</i></b> value for
+the <a href="lock.html">lock_get</a> and <a href="lock.html">lock_vec</a>
+calls.
+<hr WIDTH="100%">
+<br><b>> &lt;txn> prepare</b>
+<p>This command initiates a two-phase commit.&nbsp; It is a direct call
+to the <a href="../../docs/api_c/txn_prepare.html">txn_prepare</a> function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.
+<hr WIDTH="100%"><a NAME="> <env> lock_vec"></a><b>> &lt;env> txn_timeout
+<i>timeout</i></b>
+<p>This command sets thetransaction timeout for transactions started in
+the future in this environment.&nbsp; The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/storage/bdb/tcl/tcl_compat.c b/storage/bdb/tcl/tcl_compat.c
new file mode 100644
index 00000000000..e77bc32aedf
--- /dev/null
+++ b/storage/bdb/tcl/tcl_compat.c
@@ -0,0 +1,746 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_compat.c,v 11.39 2002/08/15 14:05:38 bostic Exp $";
+#endif /* not lint */
+
+#if CONFIG_TEST
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#define DB_DBM_HSEARCH 1
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * bdb_HCommand --
+ * Implements h* functions.
+ *
+ * PUBLIC: int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_HCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *hcmds[] = {
+ "hcreate",
+ "hdestroy",
+ "hsearch",
+ NULL
+ };
+ enum hcmds {
+ HHCREATE,
+ HHDESTROY,
+ HHSEARCH
+ };
+ static char *srchacts[] = {
+ "enter",
+ "find",
+ NULL
+ };
+ enum srchacts {
+ ACT_ENTER,
+ ACT_FIND
+ };
+ ENTRY item, *hres;
+ ACTION action;
+ int actindex, cmdindex, nelem, result, ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], hcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum hcmds)cmdindex) {
+ case HHCREATE:
+ /*
+ * Must be 1 arg, nelem. Error if not.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "nelem");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &nelem);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = hcreate(nelem) == 0 ? 1: 0;
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "hcreate");
+ }
+ break;
+ case HHSEARCH:
+ /*
+ * 3 args for this. Error if different.
+ */
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ item.key = Tcl_GetStringFromObj(objv[2], NULL);
+ item.data = Tcl_GetStringFromObj(objv[3], NULL);
+ if (Tcl_GetIndexFromObj(interp, objv[4], srchacts,
+ "action", TCL_EXACT, &actindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum srchacts)actindex) {
+ case ACT_ENTER:
+ action = ENTER;
+ break;
+ default:
+ case ACT_FIND:
+ action = FIND;
+ break;
+ }
+ _debug_check();
+ hres = hsearch(item, action);
+ if (hres == NULL)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else if (action == FIND)
+ Tcl_SetResult(interp, (char *)hres->data, TCL_STATIC);
+ else
+ /* action is ENTER */
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+
+ break;
+ case HHDESTROY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)hdestroy();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * bdb_NdbmOpen --
+ * Opens an ndbm database.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+ * PUBLIC: #endif
+ */
+int
+bdb_NdbmOpen(interp, objc, objv, dbpp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBM **dbpp; /* Dbm pointer */
+{
+ static char *ndbopen[] = {
+ "-create",
+ "-mode",
+ "-rdonly",
+ "-truncate",
+ "--",
+ NULL
+ };
+ enum ndbopen {
+ NDB_CREATE,
+ NDB_MODE,
+ NDB_RDONLY,
+ NDB_TRUNC,
+ NDB_ENDARG
+ };
+
+ u_int32_t open_flags;
+ int endarg, i, mode, optindex, read_only, result, ret;
+ char *arg, *db;
+
+ result = TCL_OK;
+ open_flags = 0;
+ endarg = mode = 0;
+ read_only = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], ndbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum ndbopen)optindex) {
+ case NDB_CREATE:
+ open_flags |= O_CREAT;
+ break;
+ case NDB_RDONLY:
+ read_only = 1;
+ break;
+ case NDB_TRUNC:
+ open_flags |= O_TRUNC;
+ break;
+ case NDB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case NDB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, or 1 left) is a
+ * file name. If we have 0, then an in-memory db. If
+ * there is 1, a db name.
+ */
+ db = NULL;
+ if (i != objc && i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (i != objc)
+ db = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the database. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the database.
+ */
+ if (read_only)
+ open_flags |= O_RDONLY;
+ else
+ open_flags |= O_RDWR;
+ _debug_check();
+ if ((*dbpp = dbm_open(db, open_flags, mode)) == NULL) {
+ ret = Tcl_GetErrno();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db open");
+ goto error;
+ }
+ return (TCL_OK);
+
+error:
+ *dbpp = NULL;
+ return (result);
+}
+
+/*
+ * bdb_DbmCommand --
+ * Implements "dbm" commands.
+ *
+ * PUBLIC: #if DB_DBM_HSEARCH != 0
+ * PUBLIC: int bdb_DbmCommand
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+ * PUBLIC: #endif
+ */
+int
+bdb_DbmCommand(interp, objc, objv, flag, dbm)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ int flag; /* Which db interface */
+ DBM *dbm; /* DBM pointer */
+{
+ static char *dbmcmds[] = {
+ "dbmclose",
+ "dbminit",
+ "delete",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "store",
+ NULL
+ };
+ enum dbmcmds {
+ DBMCLOSE,
+ DBMINIT,
+ DBMDELETE,
+ DBMFETCH,
+ DBMFIRST,
+ DBMNEXT,
+ DBMSTORE
+ };
+ static char *stflag[] = {
+ "insert", "replace",
+ NULL
+ };
+ enum stflag {
+ STINSERT, STREPLACE
+ };
+ datum key, data;
+ void *dtmp, *ktmp;
+ u_int32_t size;
+ int cmdindex, freedata, freekey, stindex, result, ret;
+ char *name, *t;
+
+ result = TCL_OK;
+ freekey = freedata = 0;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbmcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum dbmcmds)cmdindex) {
+ case DBMCLOSE:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = dbmclose();
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose");
+ break;
+ case DBMINIT:
+ /*
+ * Must be 1 arg - file.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "file");
+ return (TCL_ERROR);
+ }
+ name = Tcl_GetStringFromObj(objv[2], NULL);
+ if (flag == DBTCL_DBM)
+ ret = dbminit(name);
+ else {
+ Tcl_SetResult(interp, "Bad interface flag for command",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit");
+ break;
+ case DBMFETCH:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ data = fetch(key);
+ else if (flag == DBTCL_NDBM)
+ data = dbm_fetch(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ case DBMSTORE:
+ /*
+ * 2 args for this. Error if different.
+ */
+ if (objc != 4 && flag == DBTCL_DBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data");
+ return (TCL_ERROR);
+ }
+ if (objc != 5 && flag == DBTCL_NDBM) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key data action");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ if ((ret = _CopyObjBytes(
+ interp, objv[3], &dtmp, &size, &freedata)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ data.dsize = size;
+ data.dptr = (char *)dtmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = store(key, data);
+ else if (flag == DBTCL_NDBM) {
+ if (Tcl_GetIndexFromObj(interp, objv[4], stflag,
+ "flag", TCL_EXACT, &stindex) != TCL_OK)
+ return (IS_HELP(objv[4]));
+ switch ((enum stflag)stindex) {
+ case STINSERT:
+ flag = DBM_INSERT;
+ break;
+ case STREPLACE:
+ flag = DBM_REPLACE;
+ break;
+ }
+ ret = dbm_store(dbm, key, data, flag);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store");
+ break;
+ case DBMDELETE:
+ /*
+ * 1 arg for this. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ ret = delete(key);
+ else if (flag == DBTCL_NDBM)
+ ret = dbm_delete(dbm, key);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete");
+ break;
+ case DBMFIRST:
+ /*
+ * No arg for this. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ if (flag == DBTCL_DBM)
+ key = firstkey();
+ else if (flag == DBTCL_NDBM)
+ key = dbm_firstkey(dbm);
+ else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (key.dptr == NULL ||
+ (ret = __os_malloc(NULL, key.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, key.dptr, key.dsize);
+ t[key.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ case DBMNEXT:
+ /*
+ * 0 or 1 arg for this. Error if different.
+ */
+ _debug_check();
+ if (flag == DBTCL_DBM) {
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ data = nextkey(key);
+ } else if (flag == DBTCL_NDBM) {
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ data = dbm_nextkey(dbm);
+ } else {
+ Tcl_SetResult(interp,
+ "Bad interface flag for command", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (data.dptr == NULL ||
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
+ Tcl_SetResult(interp, "-1", TCL_STATIC);
+ else {
+ memcpy(t, data.dptr, data.dsize);
+ t[data.dsize] = '\0';
+ Tcl_SetResult(interp, t, TCL_VOLATILE);
+ __os_free(NULL, t);
+ }
+ break;
+ }
+out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+}
+
+/*
+ * ndbm_Cmd --
+ * Implements the "ndbm" widget.
+ *
+ * PUBLIC: int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+ndbm_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *ndbcmds[] = {
+ "clearerr",
+ "close",
+ "delete",
+ "dirfno",
+ "error",
+ "fetch",
+ "firstkey",
+ "nextkey",
+ "pagfno",
+ "rdonly",
+ "store",
+ NULL
+ };
+ enum ndbcmds {
+ NDBCLRERR,
+ NDBCLOSE,
+ NDBDELETE,
+ NDBDIRFNO,
+ NDBERR,
+ NDBFETCH,
+ NDBFIRST,
+ NDBNEXT,
+ NDBPAGFNO,
+ NDBRDONLY,
+ NDBSTORE
+ };
+ DBM *dbp;
+ DBTCL_INFO *dbip;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbp = (DBM *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], ndbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum ndbcmds)cmdindex) {
+ case NDBCLOSE:
+ _debug_check();
+ dbm_close(dbp);
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ res = Tcl_NewIntObj(0);
+ break;
+ case NDBDELETE:
+ case NDBFETCH:
+ case NDBFIRST:
+ case NDBNEXT:
+ case NDBSTORE:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_NDBM, dbp);
+ break;
+ case NDBCLRERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_clearerr(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "clearerr");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBDIRFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_dirfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBPAGFNO:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_pagfno(dbp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case NDBERR:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_error(dbp);
+ Tcl_SetErrno(ret);
+ Tcl_SetResult(interp, Tcl_PosixError(interp), TCL_STATIC);
+ break;
+ case NDBRDONLY:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbm_rdonly(dbp);
+ if (ret)
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "rdonly");
+ else
+ res = Tcl_NewIntObj(ret);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+#endif /* CONFIG_TEST */
diff --git a/storage/bdb/tcl/tcl_db.c b/storage/bdb/tcl/tcl_db.c
new file mode 100644
index 00000000000..7df2e48311c
--- /dev/null
+++ b/storage/bdb/tcl/tcl_db.c
@@ -0,0 +1,2421 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_db.c,v 11.107 2002/08/06 06:20:31 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbAssociate __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbClose __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *));
+static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, int));
+static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbTruncate __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCursor __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbJoin __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *, DBC **));
+static int tcl_DbGetjoin __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbCount __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_second_call __P((DB *, const DBT *, const DBT *, DBT *));
+
+/*
+ * _DbInfoDelete --
+ *
+ * PUBLIC: void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_DbInfoDelete(interp, dbip)
+ Tcl_Interp *interp;
+ DBTCL_INFO *dbip;
+{
+ DBTCL_INFO *nextp, *p;
+ /*
+ * First we have to close any open cursors. Then we close
+ * our db.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ nextp = LIST_NEXT(p, entries);
+ /*
+ * Check if this is a cursor info structure and if
+ * it is, if it belongs to this DB. If so, remove
+ * its commands and info structure.
+ */
+ if (p->i_parent == dbip && p->i_type == I_DBC) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+}
+
+/*
+ *
+ * PUBLIC: int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * db_Cmd --
+ * Implements the "db" widget.
+ */
+int
+db_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* DB handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbcmds[] = {
+#if CONFIG_TEST
+ "keyrange",
+ "pget",
+ "rpcid",
+ "test",
+#endif
+ "associate",
+ "close",
+ "count",
+ "cursor",
+ "del",
+ "get",
+ "get_join",
+ "get_type",
+ "is_byteswapped",
+ "join",
+ "put",
+ "stat",
+ "sync",
+ "truncate",
+ NULL
+ };
+ enum dbcmds {
+#if CONFIG_TEST
+ DBKEYRANGE,
+ DBPGET,
+ DBRPCID,
+ DBTEST,
+#endif
+ DBASSOCIATE,
+ DBCLOSE,
+ DBCOUNT,
+ DBCURSOR,
+ DBDELETE,
+ DBGET,
+ DBGETJOIN,
+ DBGETTYPE,
+ DBSWAPPED,
+ DBJOIN,
+ DBPUT,
+ DBSTAT,
+ DBSYNC,
+ DBTRUNCATE
+ };
+ DB *dbp;
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ DBTCL_INFO *ip;
+ DBTYPE type;
+ Tcl_Obj *res;
+ int cmdindex, isswapped, result, ret;
+ char newname[MSG_SIZE];
+
+ Tcl_ResetResult(interp);
+ dbp = (DB *)clientData;
+ dbip = _PtrToInfo((void *)dbp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbp == NULL) {
+ Tcl_SetResult(interp, "NULL db pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL db info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], dbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum dbcmds)cmdindex) {
+#if CONFIG_TEST
+ case DBKEYRANGE:
+ result = tcl_DbKeyRange(interp, objc, objv, dbp);
+ break;
+ case DBPGET:
+ result = tcl_DbGet(interp, objc, objv, dbp, 1);
+ break;
+ case DBRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbp->cl_id);
+ break;
+ case DBTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbp->dbenv);
+ break;
+#endif
+ case DBASSOCIATE:
+ result = tcl_DbAssociate(interp, objc, objv, dbp);
+ break;
+ case DBCLOSE:
+ result = tcl_DbClose(interp, objc, objv, dbp, dbip);
+ break;
+ case DBDELETE:
+ result = tcl_DbDelete(interp, objc, objv, dbp);
+ break;
+ case DBGET:
+ result = tcl_DbGet(interp, objc, objv, dbp, 0);
+ break;
+ case DBPUT:
+ result = tcl_DbPut(interp, objc, objv, dbp);
+ break;
+ case DBCOUNT:
+ result = tcl_DbCount(interp, objc, objv, dbp);
+ break;
+ case DBSWAPPED:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_byteswapped(dbp, &isswapped);
+ res = Tcl_NewIntObj(isswapped);
+ break;
+ case DBGETTYPE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->get_type(dbp, &type);
+ if (type == DB_BTREE)
+ res = Tcl_NewStringObj("btree", strlen("btree"));
+ else if (type == DB_HASH)
+ res = Tcl_NewStringObj("hash", strlen("hash"));
+ else if (type == DB_RECNO)
+ res = Tcl_NewStringObj("recno", strlen("recno"));
+ else if (type == DB_QUEUE)
+ res = Tcl_NewStringObj("queue", strlen("queue"));
+ else {
+ Tcl_SetResult(interp,
+ "db gettype: Returned unknown type\n", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBSTAT:
+ result = tcl_DbStat(interp, objc, objv, dbp);
+ break;
+ case DBSYNC:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbp->sync(dbp, 0);
+ res = Tcl_NewIntObj(ret);
+ if (ret != 0) {
+ Tcl_SetObjResult(interp, res);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbCursor(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBJOIN:
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ ip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (ip != NULL) {
+ result = tcl_DbJoin(interp, objc, objv, dbp, &dbc);
+ if (result == TCL_OK) {
+ dbip->i_dbdbcid++;
+ ip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)dbc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbc);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGETJOIN:
+ result = tcl_DbGetjoin(interp, objc, objv, dbp);
+ break;
+ case DBTRUNCATE:
+ result = tcl_DbTruncate(interp, objc, objv, dbp);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_db_stat --
+ */
+static int
+tcl_DbStat(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ void *sp;
+ Tcl_Obj *res, *flaglist, *myobjv[2];
+ DBTYPE type;
+ u_int32_t flag;
+ int result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-faststat?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-faststat") == 0)
+ flag = DB_FAST_STAT;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbp->stat(dbp, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ (void)dbp->get_type(dbp, &type);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ if (type == DB_HASH) {
+ hsp = (DB_HASH_STAT *)sp;
+ MAKE_STAT_LIST("Magic", hsp->hash_magic);
+ MAKE_STAT_LIST("Version", hsp->hash_version);
+ MAKE_STAT_LIST("Page size", hsp->hash_pagesize);
+ MAKE_STAT_LIST("Number of keys", hsp->hash_nkeys);
+ MAKE_STAT_LIST("Number of records", hsp->hash_ndata);
+ MAKE_STAT_LIST("Fill factor", hsp->hash_ffactor);
+ MAKE_STAT_LIST("Buckets", hsp->hash_buckets);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Free pages", hsp->hash_free);
+ MAKE_STAT_LIST("Bytes free", hsp->hash_bfree);
+ MAKE_STAT_LIST("Number of big pages",
+ hsp->hash_bigpages);
+ MAKE_STAT_LIST("Big pages bytes free",
+ hsp->hash_big_bfree);
+ MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows);
+ MAKE_STAT_LIST("Overflow bytes free",
+ hsp->hash_ovfl_free);
+ MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ hsp->hash_dup_free);
+ }
+ } else if (type == DB_QUEUE) {
+ qsp = (DB_QUEUE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", qsp->qs_magic);
+ MAKE_STAT_LIST("Version", qsp->qs_version);
+ MAKE_STAT_LIST("Page size", qsp->qs_pagesize);
+ MAKE_STAT_LIST("Extent size", qsp->qs_extentsize);
+ MAKE_STAT_LIST("Number of records", qsp->qs_nkeys);
+ MAKE_STAT_LIST("Record length", qsp->qs_re_len);
+ MAKE_STAT_LIST("Record pad", qsp->qs_re_pad);
+ MAKE_STAT_LIST("First record number", qsp->qs_first_recno);
+ MAKE_STAT_LIST("Last record number", qsp->qs_cur_recno);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Number of pages", qsp->qs_pages);
+ MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree);
+ }
+ } else { /* BTREE and RECNO are same stats */
+ bsp = (DB_BTREE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", bsp->bt_magic);
+ MAKE_STAT_LIST("Version", bsp->bt_version);
+ MAKE_STAT_LIST("Number of keys", bsp->bt_nkeys);
+ MAKE_STAT_LIST("Number of records", bsp->bt_ndata);
+ MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey);
+ MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len);
+ MAKE_STAT_LIST("Record pad", bsp->bt_re_pad);
+ MAKE_STAT_LIST("Page size", bsp->bt_pagesize);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Levels", bsp->bt_levels);
+ MAKE_STAT_LIST("Internal pages", bsp->bt_int_pg);
+ MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg);
+ MAKE_STAT_LIST("Duplicate pages", bsp->bt_dup_pg);
+ MAKE_STAT_LIST("Overflow pages", bsp->bt_over_pg);
+ MAKE_STAT_LIST("Pages on freelist", bsp->bt_free);
+ MAKE_STAT_LIST("Internal pages bytes free",
+ bsp->bt_int_pgfree);
+ MAKE_STAT_LIST("Leaf pages bytes free",
+ bsp->bt_leaf_pgfree);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ bsp->bt_dup_pgfree);
+ MAKE_STAT_LIST("Bytes free in overflow pages",
+ bsp->bt_over_pgfree);
+ }
+ }
+
+ /*
+ * Construct a {name {flag1 flag2 ... flagN}} list for the
+ * dbp flags. These aren't access-method dependent, but they
+ * include all the interesting flags, and the integer value
+ * isn't useful from Tcl--return the strings instead.
+ */
+ myobjv[0] = Tcl_NewStringObj("Flags", strlen("Flags"));
+ myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_inmemdbflags);
+ flaglist = Tcl_NewListObj(2, myobjv);
+ if (flaglist == NULL) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ if ((result =
+ Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK)
+ goto error;
+
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_db_close --
+ */
+static int
+tcl_DbClose(interp, objc, objv, dbp, dbip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBTCL_INFO *dbip; /* Info pointer */
+{
+ static char *dbclose[] = {
+ "-nosync", "--", NULL
+ };
+ enum dbclose {
+ TCL_DBCLOSE_NOSYNC,
+ TCL_DBCLOSE_ENDARG
+ };
+ u_int32_t flag;
+ int endarg, i, optindex, result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ endarg = 0;
+ flag = 0;
+ if (objc > 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nosync?");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbclose,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbclose)optindex) {
+ case TCL_DBCLOSE_NOSYNC:
+ flag = DB_NOSYNC;
+ break;
+ case TCL_DBCLOSE_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ return (result);
+ if (endarg)
+ break;
+ }
+ _DbInfoDelete(interp, dbip);
+ _debug_check();
+
+ /* Paranoia. */
+ dbp->api_internal = NULL;
+
+ ret = (dbp)->close(dbp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db close");
+ return (result);
+}
+
+/*
+ * tcl_db_put --
+ */
+static int
+tcl_DbPut(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbputopts[] = {
+#if CONFIG_TEST
+ "-nodupdata",
+#endif
+ "-append",
+ "-auto_commit",
+ "-nooverwrite",
+ "-partial",
+ "-txn",
+ NULL
+ };
+ enum dbputopts {
+#if CONFIG_TEST
+ DBGET_NODUPDATA,
+#endif
+ DBPUT_APPEND,
+ DBPUT_AUTO_COMMIT,
+ DBPUT_NOOVER,
+ DBPUT_PART,
+ DBPUT_TXN
+ };
+ static char *dbputapp[] = {
+ "-append", NULL
+ };
+ enum dbputapp { DBPUT_APPEND0 };
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int auto_commit, elemc, end, freekey, freedata;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc <= 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key data");
+ return (TCL_ERROR);
+ }
+
+ freekey = freedata = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * If it is a QUEUE or RECNO database, the key is a record number
+ * and must be setup up to contain a db_recno_t. Otherwise the
+ * key is a "string".
+ */
+ (void)dbp->get_type(dbp, &type);
+
+ /*
+ * We need to determine where the end of required args are. If we
+ * are using a QUEUE/RECNO db and -append, then there is just one
+ * req arg (data). Otherwise there are two (key data).
+ *
+ * We preparse the list to determine this since we need to know
+ * to properly check # of args for other options below.
+ */
+ end = objc - 2;
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ i = 2;
+ while (i < objc - 1) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], dbputapp,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ continue;
+ switch ((enum dbputapp)optindex) {
+ case DBPUT_APPEND0:
+ end = objc - 1;
+ break;
+ }
+ }
+ }
+ Tcl_ResetResult(interp);
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ auto_commit = 0;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ dbputopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum dbputopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
+ case DBPUT_TXN:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBPUT_AUTO_COMMIT:
+ auto_commit = 1;
+ break;
+ case DBPUT_APPEND:
+ FLAG_CHECK(flag);
+ flag = DB_APPEND;
+ break;
+ case DBPUT_NOOVER:
+ FLAG_CHECK(flag);
+ flag = DB_NOOVERWRITE;
+ break;
+ case DBPUT_PART:
+ if (i > (end - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags = DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (auto_commit)
+ flag |= DB_AUTO_COMMIT;
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * If we are a recno db and we are NOT using append, then the 2nd
+ * last arg is the key.
+ */
+ if (type == DB_QUEUE || type == DB_RECNO) {
+ key.data = &recno;
+ key.ulen = key.size = sizeof(db_recno_t);
+ key.flags = DB_DBT_USERMEM;
+ if (flag == DB_APPEND)
+ recno = 0;
+ else {
+ result = _GetUInt32(interp, objv[objc-2], &recno);
+ if (result != TCL_OK)
+ return (result);
+ }
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ goto out;
+ }
+ data.data = dtmp;
+ _debug_check();
+ ret = dbp->put(dbp, txn, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put");
+ if (ret == 0 &&
+ (type == DB_RECNO || type == DB_QUEUE) && flag == DB_APPEND) {
+ res = Tcl_NewLongObj((long)recno);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_db_get --
+ */
+static int
+tcl_DbGet(interp, objc, objv, dbp, ispget)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ int ispget; /* 1 for pget, 0 for get */
+{
+ static char *dbgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-multi",
+#endif
+ "-consume",
+ "-consume_wait",
+ "-get_both",
+ "-glob",
+ "-partial",
+ "-recno",
+ "-rmw",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum dbgetopts {
+#if CONFIG_TEST
+ DBGET_DIRTY,
+ DBGET_MULTI,
+#endif
+ DBGET_CONSUME,
+ DBGET_CONSUME_WAIT,
+ DBGET_BOTH,
+ DBGET_GLOB,
+ DBGET_PART,
+ DBGET_RECNO,
+ DBGET_RMW,
+ DBGET_TXN,
+ DBGET_ENDARG
+ };
+ DBC *dbc;
+ DBT key, pkey, data, save;
+ DBTYPE type;
+ DB_TXN *txn;
+ Tcl_Obj **elemv, *retlist;
+ void *dtmp, *ktmp;
+ u_int32_t flag, cflag, isdup, mflag, rmw;
+ int bufsize, elemc, end, endarg, freekey, freedata, i;
+ int optindex, result, ret, useglob, useprecno, userecno;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+ db_recno_t precno, recno;
+
+ result = TCL_OK;
+ freekey = freedata = 0;
+ cflag = endarg = flag = mflag = rmw = 0;
+ useglob = userecno = useprecno = 0;
+ txn = NULL;
+ pattern = prefix = NULL;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ memset(&save, 0, sizeof(save));
+
+ /* For the primary key in a pget call. */
+ memset(&pkey, 0, sizeof(pkey));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ (void)dbp->get_type(dbp, &type);
+ end = objc;
+ while (i < end) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto out;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_DIRTY:
+ rmw |= DB_DIRTY_READ;
+ break;
+ case DBGET_MULTI:
+ mflag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
+ case DBGET_BOTH:
+ /*
+ * Change 'end' and make sure we aren't already past
+ * the new end.
+ */
+ if (i > objc - 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-get_both key data?");
+ result = TCL_ERROR;
+ break;
+ }
+ end = objc - 2;
+ FLAG_CHECK(flag);
+ flag = DB_GET_BOTH;
+ break;
+ case DBGET_TXN:
+ if (i >= end) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Get: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBGET_GLOB:
+ useglob = 1;
+ end = objc - 1;
+ break;
+ case DBGET_CONSUME:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME;
+ break;
+ case DBGET_CONSUME_WAIT:
+ FLAG_CHECK(flag);
+ flag = DB_CONSUME_WAIT;
+ break;
+ case DBGET_RECNO:
+ end = objc - 1;
+ userecno = 1;
+ if (type != DB_RECNO && type != DB_QUEUE) {
+ FLAG_CHECK(flag);
+ flag = DB_SET_RECNO;
+ }
+ break;
+ case DBGET_RMW:
+ rmw |= DB_RMW;
+ break;
+ case DBGET_PART:
+ end = objc - 1;
+ if (i == end) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ save.flags = DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &save.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &save.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ case DBGET_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+ if (result != TCL_OK)
+ break;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ userecno = 1;
+
+ /*
+ * Check args we have left versus the flags we were given.
+ * We might have 0, 1 or 2 left. If we have 0, it must
+ * be DB_CONSUME*, if 2, then DB_GET_BOTH, all others should
+ * be 1.
+ */
+ if (((flag == DB_CONSUME || flag == DB_CONSUME_WAIT) && i != objc) ||
+ (flag == DB_GET_BOTH && i != objc - 2)) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given based on flags specified\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else if (flag == 0 && i != objc - 1) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given\n", TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ /*
+ * XXX
+ * We technically shouldn't be looking inside the dbp like this,
+ * but this is the only way to figure out whether the primary
+ * key should also be a recno.
+ */
+ if (ispget) {
+ if (dbp->s_primary != NULL &&
+ (dbp->s_primary->type == DB_RECNO ||
+ dbp->s_primary->type == DB_QUEUE))
+ useprecno = 1;
+ }
+
+ /*
+ * Check for illegal combos of options.
+ */
+ if (useglob && (userecno || flag == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE)) {
+ Tcl_SetResult(interp,
+ "Cannot use -glob and record numbers.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (useglob && flag == DB_GET_BOTH) {
+ Tcl_SetResult(interp,
+ "Only one of -glob or -get_both can be specified.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ if (useglob)
+ pattern = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+
+ /*
+ * This is the list we return
+ */
+ retlist = Tcl_NewListObj(0, NULL);
+ save.flags |= DB_DBT_MALLOC;
+
+ /*
+ * isdup is used to know if we support duplicates. If not, we
+ * can just do a db->get call and avoid using cursors.
+ * XXX
+ * When there is a db->get_flags method, it should be used.
+ * isdup = dbp->get_flags(dbp) & DB_DUP;
+ * For now we illegally peek.
+ * XXX
+ */
+ isdup = dbp->flags & DB_AM_DUP;
+
+ /*
+ * If the database doesn't support duplicates or we're performing
+ * ops that don't require returning multiple items, use DB->get
+ * instead of a cursor operation.
+ */
+ if (pattern == NULL && (isdup == 0 || mflag != 0 ||
+ flag == DB_SET_RECNO || flag == DB_GET_BOTH ||
+ flag == DB_CONSUME || flag == DB_CONSUME_WAIT)) {
+ if (flag == DB_GET_BOTH) {
+ if (userecno) {
+ result = _GetUInt32(interp,
+ objv[(objc - 2)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
+ }
+ /*
+ * Already checked args above. Fill in key and save.
+ * Save is used in the dbp->get call below to fill in
+ * data.
+ *
+ * If the "data" here is really a primary key--that
+ * is, if we're in a pget--and that primary key
+ * is a recno, treat it appropriately as an int.
+ */
+ if (useprecno) {
+ result = _GetUInt32(interp,
+ objv[objc - 1], &precno);
+ if (result == TCL_OK) {
+ save.data = &precno;
+ save.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &save.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ save.data = dtmp;
+ }
+ } else if (flag != DB_CONSUME && flag != DB_CONSUME_WAIT) {
+ if (userecno) {
+ result = _GetUInt32(
+ interp, objv[(objc - 1)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
+ }
+ if (mflag & DB_MULTIPLE) {
+ if ((ret = __os_malloc(dbp->dbenv,
+ bufsize, &save.data)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ goto out;
+ }
+ save.ulen = bufsize;
+ F_CLR(&save, DB_DBT_MALLOC);
+ F_SET(&save, DB_DBT_USERMEM);
+ }
+ }
+
+ data = save;
+
+ if (ispget) {
+ if (flag == DB_GET_BOTH) {
+ pkey.data = save.data;
+ pkey.size = save.size;
+ data.data = NULL;
+ data.size = 0;
+ }
+ F_SET(&pkey, DB_DBT_MALLOC);
+ _debug_check();
+ ret = dbp->pget(dbp,
+ txn, &key, &pkey, &data, flag | rmw);
+ } else {
+ _debug_check();
+ ret = dbp->get(dbp,
+ txn, &key, &data, flag | rmw | mflag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret),
+ "db get");
+ if (ret == 0) {
+ /*
+ * Success. Return a list of the form {name value}
+ * If it was a recno in key.data, we need to convert
+ * into a string/object representation of that recno.
+ */
+ if (mflag & DB_MULTIPLE)
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if (type == DB_RECNO || type == DB_QUEUE)
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 1, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListRecnoElem(interp,
+ retlist, *(db_recno_t *)key.data,
+ data.data, data.size);
+ else {
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 0, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ }
+ }
+ /*
+ * Free space from DBT.
+ *
+ * If we set DB_DBT_MALLOC, we need to free the space if
+ * and only if we succeeded (and thus if DB allocated
+ * anything). If DB_DBT_MALLOC is not set, this is a bulk
+ * get buffer, and needs to be freed no matter what.
+ */
+ if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0)
+ __os_ufree(dbp->dbenv, data.data);
+ else if (!F_ISSET(&data, DB_DBT_MALLOC))
+ __os_free(dbp->dbenv, data.data);
+ if (ispget && ret == 0)
+ __os_ufree(dbp->dbenv, pkey.data);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+ goto out;
+ }
+
+ if (userecno) {
+ result = _GetUInt32(interp, objv[(objc - 1)], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db cursor");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * At this point, we have a cursor, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix. If we have
+ * an exact key, we go to that key position, and step through
+ * all the duplicates. In either case we build up a list of
+ * the form {{key data} {key data}...} along the way.
+ */
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ if (pattern) {
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out1;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ /*
+ * If they give us an empty pattern string
+ * (i.e. -glob *), go through entire DB.
+ */
+ if (strlen(prefix) == 0)
+ cflag = DB_FIRST;
+ else
+ cflag = DB_SET_RANGE;
+ } else
+ cflag = DB_SET;
+ if (ispget) {
+ _debug_check();
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else {
+ _debug_check();
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db get (cursor)");
+ if (result == TCL_ERROR)
+ goto out1;
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ free(data.data);
+ goto out1;
+ }
+ if (pattern)
+ cflag = DB_NEXT;
+ else
+ cflag = DB_NEXT_DUP;
+
+ while (ret == 0 && result == TCL_OK) {
+ /*
+ * Build up our {name value} sublist
+ */
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &pkey, useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ if (ispget)
+ free(pkey.data);
+ free(data.data);
+ if (result != TCL_OK)
+ break;
+ /*
+ * Append {name value} to return list
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Restore any "partial" info we have saved.
+ */
+ data = save;
+ if (ispget) {
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ free(data.data);
+ break;
+ }
+ }
+out1:
+ dbc->c_close(dbc);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ if (prefix != NULL)
+ __os_free(dbp->dbenv, prefix);
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_db_delete --
+ */
+static int
+tcl_DbDelete(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbdelopts[] = {
+ "-auto_commit",
+ "-glob",
+ "-txn",
+ NULL
+ };
+ enum dbdelopts {
+ DBDEL_AUTO_COMMIT,
+ DBDEL_GLOB,
+ DBDEL_TXN
+ };
+ DBC *dbc;
+ DBT key, data;
+ DBTYPE type;
+ DB_TXN *txn;
+ void *ktmp;
+ db_recno_t recno;
+ int freekey, i, optindex, result, ret;
+ u_int32_t flag;
+ char *arg, *pattern, *prefix, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ freekey = 0;
+ flag = 0;
+ pattern = prefix = NULL;
+ txn = NULL;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ /*
+ * The first arg must be -auto_commit, -glob, -txn or a list of keys.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbdelopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * If we don't have a -auto_commit, -glob or -txn,
+ * then the remaining args must be exact keys.
+ * Reset the result so we don't get an errant error
+ * message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbdelopts)optindex) {
+ case DBDEL_TXN:
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Delete: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case DBDEL_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBDEL_GLOB:
+ /*
+ * Get the pattern. Get the prefix and use cursors to
+ * get all the data items.
+ */
+ if (i == objc) {
+ /*
+ * Someone could conceivably have a key of
+ * the same name. So just break and use it.
+ */
+ i--;
+ break;
+ }
+ pattern = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+
+ if (result != TCL_OK)
+ goto out;
+ /*
+ * XXX
+ * For consistency with get, we have decided for the moment, to
+ * allow -glob, or one key, not many. The code was originally
+ * written to take many keys and we'll leave it that way, because
+ * tcl_DbGet may one day accept many disjoint keys to get, rather
+ * than one, and at that time we'd make delete be consistent. In
+ * any case, the code is already here and there is no need to remove,
+ * just check that we only have one arg left.
+ *
+ * If we have a pattern AND more keys to process, there is an error.
+ * Either we have some number of exact keys, or we have a pattern.
+ *
+ * If we have a pattern and an auto commit flag, there is an error.
+ */
+ if (pattern == NULL) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ } else {
+ if (i != objc) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (flag & DB_AUTO_COMMIT) {
+ Tcl_SetResult(interp,
+ "Cannot use -auto_commit and patterns.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+
+ /*
+ * If we have remaining args, they are all exact keys. Call
+ * DB->del on each of those keys.
+ *
+ * If it is a RECNO database, the key is a record number and must be
+ * setup up to contain a db_recno_t. Otherwise the key is a "string".
+ */
+ (void)dbp->get_type(dbp, &type);
+ ret = 0;
+ while (i < objc && ret == 0) {
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[i++], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBDEL(ret), "db del");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->del(dbp, txn, &key, flag);
+ /*
+ * If we have any error, set up return result and stop
+ * processing keys.
+ */
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ if (ret != 0)
+ break;
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBDEL(ret), "db del");
+
+ /*
+ * At this point we've either finished or, if we have a pattern,
+ * we go to the nearest one and step forward until we don't
+ * have any more that match the pattern prefix.
+ */
+ if (pattern) {
+ ret = dbp->cursor(dbp, txn, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
+ goto out;
+ }
+ /*
+ * Note, prefix is returned in new space. Must free it.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = _GetGlobPrefix(pattern, &prefix);
+ if (ret) {
+ result = TCL_ERROR;
+ Tcl_SetResult(interp,
+ "Unable to allocate pattern space", TCL_STATIC);
+ goto out;
+ }
+ key.data = prefix;
+ key.size = strlen(prefix);
+ if (strlen(prefix) == 0)
+ flag = DB_FIRST;
+ else
+ flag = DB_SET_RANGE;
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ while (ret == 0 &&
+ memcmp(key.data, prefix, strlen(prefix)) == 0) {
+ /*
+ * Each time through here the cursor is pointing
+ * at the current valid item. Delete it and
+ * move ahead.
+ */
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCDEL(ret), "db c_del");
+ break;
+ }
+ /*
+ * Deleted the current, now move to the next item
+ * in the list, check if it matches the prefix pattern.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ /*
+ * _GetGlobPrefix(), the function which allocates prefix, works
+ * by copying and condensing another string. Thus prefix may
+ * have multiple nuls at the end, so we free using __os_free().
+ */
+ __os_free(dbp->dbenv, prefix);
+ dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db del");
+ }
+out:
+ return (result);
+}
+
+/*
+ * tcl_db_cursor --
+ */
+static int
+tcl_DbCursor(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Return cursor pointer */
+{
+ static char *dbcuropts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-update",
+#endif
+ "-txn",
+ NULL
+ };
+ enum dbcuropts {
+#if CONFIG_TEST
+ DBCUR_DIRTY,
+ DBCUR_UPDATE,
+#endif
+ DBCUR_TXN
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ txn = NULL;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+#if CONFIG_TEST
+ case DBCUR_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCUR_UPDATE:
+ flag |= DB_WRITECURSOR;
+ break;
+#endif
+ case DBCUR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Cursor: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->cursor(dbp, txn, dbcp, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db cursor");
+out:
+ return (result);
+}
+
+/*
+ * tcl_DbAssociate --
+ * Call DB->associate().
+ */
+static int
+tcl_DbAssociate(interp, objc, objv, dbp)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB *dbp;
+{
+ static char *dbaopts[] = {
+ "-auto_commit",
+ "-create",
+ "-txn",
+ NULL
+ };
+ enum dbaopts {
+ DBA_AUTO_COMMIT,
+ DBA_CREATE,
+ DBA_TXN
+ };
+ DB *sdbp;
+ DB_TXN *txn;
+ DBTCL_INFO *sdbip;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+ u_int32_t flag;
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "[callback] secondary");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbaopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbaopts)optindex) {
+ case DBA_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBA_CREATE:
+ flag |= DB_CREATE;
+ break;
+ case DBA_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+
+ /*
+ * Better be 1 or 2 args left. The last arg must be the sdb
+ * handle. If 2 args then objc-2 is the callback proc, else
+ * we have a NULL callback.
+ */
+ /* Get the secondary DB handle. */
+ arg = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+ sdbp = NAME_TO_DB(arg);
+ if (sdbp == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid database handle: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * The callback is simply a Tcl object containing the name
+ * of the callback proc, which is the second-to-last argument.
+ *
+ * Note that the callback needs to go in the *secondary* DB handle's
+ * info struct; we may have multiple secondaries with different
+ * callbacks.
+ */
+ sdbip = (DBTCL_INFO *)sdbp->api_internal;
+ if (i != objc - 1) {
+ /*
+ * We have 2 args, get the callback.
+ */
+ sdbip->i_second_call = objv[objc - 2];
+ Tcl_IncrRefCount(sdbip->i_second_call);
+
+ /* Now call associate. */
+ _debug_check();
+ ret = dbp->associate(dbp, txn, sdbp, tcl_second_call, flag);
+ } else {
+ /*
+ * We have a NULL callback.
+ */
+ sdbip->i_second_call = NULL;
+ ret = dbp->associate(dbp, txn, sdbp, NULL, flag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "associate");
+
+ return (result);
+}
+
+/*
+ * tcl_second_call --
+ * Callback function for secondary indices. Get the callback
+ * out of ip->i_second_call and call it.
+ */
+static int
+tcl_second_call(dbp, pkey, data, skey)
+ DB *dbp;
+ const DBT *pkey, *data;
+ DBT *skey;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *pobj, *dobj, *objv[3];
+ int len, result, ret;
+ void *retbuf, *databuf;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_second_call;
+
+ /*
+ * Create two ByteArray objects, with the contents of the pkey
+ * and data DBTs that are our inputs.
+ */
+ pobj = Tcl_NewByteArrayObj(pkey->data, pkey->size);
+ Tcl_IncrRefCount(pobj);
+ dobj = Tcl_NewByteArrayObj(data->data, data->size);
+ Tcl_IncrRefCount(dobj);
+
+ objv[1] = pobj;
+ objv[2] = dobj;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+
+ Tcl_DecrRefCount(pobj);
+ Tcl_DecrRefCount(dobj);
+
+ if (result != TCL_OK) {
+ __db_err(dbp->dbenv,
+ "Tcl callback function failed with code %d", result);
+ return (EINVAL);
+ }
+
+ retbuf =
+ Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &len);
+
+ /*
+ * retbuf is owned by Tcl; copy it into malloc'ed memory.
+ * We need to use __os_umalloc rather than ufree because this will
+ * be freed by DB using __os_ufree--the DB_DBT_APPMALLOC flag
+ * tells DB to free application-allocated memory.
+ */
+ if ((ret = __os_umalloc(dbp->dbenv, len, &databuf)) != 0)
+ return (ret);
+ memcpy(databuf, retbuf, len);
+
+ skey->data = databuf;
+ skey->size = len;
+ F_SET(skey, DB_DBT_APPMALLOC);
+
+ return (0);
+}
+
+/*
+ * tcl_db_join --
+ */
+static int
+tcl_DbJoin(interp, objc, objv, dbp, dbcp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+ DBC **dbcp; /* Cursor pointer */
+{
+ static char *dbjopts[] = {
+ "-nosort",
+ NULL
+ };
+ enum dbjopts {
+ DBJ_NOSORT
+ };
+ DBC **listp;
+ u_int32_t flag;
+ int adj, i, j, optindex, size, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "curs1 curs2 ...");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbjopts)optindex) {
+ case DBJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ /*
+ * Allocate one more for NULL ptr at end of list.
+ */
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(dbp->dbenv, size, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ listp[j] = NAME_TO_DBC(arg);
+ if (listp[j] == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Join: Invalid cursor: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, dbcp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
+
+out:
+ __os_free(dbp->dbenv, listp);
+ return (result);
+}
+
+/*
+ * tcl_db_getjoin --
+ */
+static int
+tcl_DbGetjoin(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbgetjopts[] = {
+#if CONFIG_TEST
+ "-nosort",
+#endif
+ "-txn",
+ NULL
+ };
+ enum dbgetjopts {
+#if CONFIG_TEST
+ DBGETJ_NOSORT,
+#endif
+ DBGETJ_TXN
+ };
+ DB_TXN *txn;
+ DB *elemdbp;
+ DBC **listp;
+ DBC *dbc;
+ DBT key, data;
+ Tcl_Obj **elemv, *retlist;
+ void *ktmp;
+ u_int32_t flag;
+ int adj, elemc, freekey, i, j, optindex, result, ret, size;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ...");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ adj = i;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbgetjopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbgetjopts)optindex) {
+#if CONFIG_TEST
+ case DBGETJ_NOSORT:
+ flag |= DB_JOIN_NOSORT;
+ adj++;
+ break;
+#endif
+ case DBGETJ_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ adj += 2;
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "GetJoin: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ size = sizeof(DBC *) * ((objc - adj) + 1);
+ ret = __os_malloc(NULL, size, &listp);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ memset(listp, 0, size);
+ for (j = 0, i = adj; i < objc; i++, j++) {
+ /*
+ * Get each sublist as {db key}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp, "Lists must be {db key}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * Get a pointer to that open db. Then, open a cursor in
+ * that db, and go to the "key" place.
+ */
+ elemdbp = NAME_TO_DB(Tcl_GetStringFromObj(elemv[0], NULL));
+ if (elemdbp == NULL) {
+ snprintf(msg, MSG_SIZE, "Get_join: Invalid db: %s\n",
+ Tcl_GetStringFromObj(elemv[0], NULL));
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto out;
+ }
+ ret = elemdbp->cursor(elemdbp, txn, &listp[j], 0);
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor")) == TCL_ERROR)
+ goto out;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ ret = _CopyObjBytes(interp, elemv[elemc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db join");
+ goto out;
+ }
+ key.data = ktmp;
+ ret = (listp[j])->c_get(listp[j], &key, &data, DB_SET);
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db cget")) == TCL_ERROR)
+ goto out;
+ }
+ listp[j] = NULL;
+ _debug_check();
+ ret = dbp->join(dbp, listp, &dbc, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ while (ret == 0 && result == TCL_OK) {
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.flags |= DB_DBT_MALLOC;
+ data.flags |= DB_DBT_MALLOC;
+ ret = dbc->c_get(dbc, &key, &data, 0);
+ /*
+ * Build up our {name value} sublist
+ */
+ if (ret == 0) {
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ free(key.data);
+ free(data.data);
+ }
+ }
+ dbc->c_close(dbc);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ while (j) {
+ if (listp[j])
+ (listp[j])->c_close(listp[j]);
+ j--;
+ }
+ __os_free(dbp->dbenv, listp);
+ return (result);
+}
+
+/*
+ * tcl_DbCount --
+ */
+static int
+tcl_DbCount(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ Tcl_Obj *res;
+ DBC *dbc;
+ DBT key, data;
+ void *ktmp;
+ db_recno_t count, recno;
+ int freekey, result, ret;
+
+ result = TCL_OK;
+ count = 0;
+ freekey = 0;
+ res = NULL;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "key");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+
+ /*
+ * Get the count for our key.
+ * We do this by getting a cursor for this DB. Moving the cursor
+ * to the set location, and getting a count on that cursor.
+ */
+ ret = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * If it's a queue or recno database, we must make sure to
+ * treat the key as a recno rather than as a byte string.
+ */
+ if (dbp->type == DB_RECNO || dbp->type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db count");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->cursor(dbp, NULL, &dbc, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
+ goto out;
+ }
+ /*
+ * Move our cursor to the key.
+ */
+ ret = dbc->c_get(dbc, &key, &data, DB_SET);
+ if (ret == DB_NOTFOUND)
+ count = 0;
+ else {
+ ret = dbc->c_count(dbc, &count, 0);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db c count");
+ goto out;
+ }
+ }
+ res = Tcl_NewLongObj((long)count);
+ Tcl_SetObjResult(interp, res);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ (void)dbc->c_close(dbc);
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_DbKeyRange --
+ */
+static int
+tcl_DbKeyRange(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbkeyropts[] = {
+ "-txn",
+ NULL
+ };
+ enum dbkeyropts {
+ DBKEYR_TXN
+ };
+ DB_TXN *txn;
+ DB_KEY_RANGE range;
+ DBT key;
+ DBTYPE type;
+ Tcl_Obj *myobjv[3], *retlist;
+ void *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int freekey, i, myobjc, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key");
+ return (TCL_ERROR);
+ }
+
+ txn = NULL;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbkeyropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbkeyropts)optindex) {
+ case DBKEYR_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "KeyRange: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+ (void)dbp->get_type(dbp, &type);
+ ret = 0;
+ /*
+ * Make sure we have a key.
+ */
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ memset(&key, 0, sizeof(key));
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[i], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db keyrange");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ _debug_check();
+ ret = dbp->key_range(dbp, txn, &key, &range, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db keyrange");
+ if (result == TCL_ERROR)
+ goto out;
+
+ /*
+ * If we succeeded, set up return list.
+ */
+ myobjc = 3;
+ myobjv[0] = Tcl_NewDoubleObj(range.less);
+ myobjv[1] = Tcl_NewDoubleObj(range.equal);
+ myobjv[2] = Tcl_NewDoubleObj(range.greater);
+ retlist = Tcl_NewListObj(myobjc, myobjv);
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+#endif
+
+/*
+ * tcl_DbTruncate --
+ */
+static int
+tcl_DbTruncate(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbcuropts[] = {
+ "-auto_commit",
+ "-txn",
+ NULL
+ };
+ enum dbcuropts {
+ DBTRUNC_AUTO_COMMIT,
+ DBTRUNC_TXN
+ };
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ u_int32_t count, flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ flag = 0;
+ result = TCL_OK;
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+ case DBTRUNC_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBTRUNC_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Truncate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->truncate(dbp, txn, &count, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db truncate");
+
+ else {
+ res = Tcl_NewLongObj((long)count);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ return (result);
+}
diff --git a/storage/bdb/tcl/tcl_db_pkg.c b/storage/bdb/tcl/tcl_db_pkg.c
new file mode 100644
index 00000000000..ce37598dc1a
--- /dev/null
+++ b/storage/bdb/tcl/tcl_db_pkg.c
@@ -0,0 +1,3117 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_db_pkg.c,v 11.141 2002/08/14 20:15:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#if CONFIG_TEST
+#define DB_DBM_HSEARCH 1
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/tcl_db.h"
+
+/* XXX we must declare global data in just one place */
+DBTCL_GLOBAL __dbtcl_global;
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int berkdb_Cmd __P((ClientData, Tcl_Interp *, int,
+ Tcl_Obj * CONST*));
+static int bdb_EnvOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB_ENV **));
+static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DBTCL_INFO *, DB **));
+static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+static int tcl_bt_compare __P((DB *, const DBT *, const DBT *));
+static int tcl_compare_callback __P((DB *, const DBT *, const DBT *,
+ Tcl_Obj *, char *));
+static int tcl_dup_compare __P((DB *, const DBT *, const DBT *));
+static u_int32_t tcl_h_hash __P((DB *, const void *, u_int32_t));
+static int tcl_rep_send __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+
+#ifdef TEST_ALLOC
+static void * tcl_db_malloc __P((size_t));
+static void * tcl_db_realloc __P((void *, size_t));
+static void tcl_db_free __P((void *));
+#endif
+
+/*
+ * Db_tcl_Init --
+ *
+ * This is a package initialization procedure, which is called by Tcl when
+ * this package is to be added to an interpreter. The name is based on the
+ * name of the shared library, currently libdb_tcl-X.Y.so, which Tcl uses
+ * to determine the name of this function.
+ */
+int
+Db_tcl_Init(interp)
+ Tcl_Interp *interp; /* Interpreter in which the package is
+ * to be made available. */
+{
+ int code;
+
+ code = Tcl_PkgProvide(interp, "Db_tcl", "1.0");
+ if (code != TCL_OK)
+ return (code);
+
+ Tcl_CreateObjCommand(interp, "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd,
+ (ClientData)0, NULL);
+ /*
+ * Create shared global debugging variables
+ */
+ Tcl_LinkVar(interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_print", (char *)&__debug_print,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_stop", (char *)&__debug_stop,
+ TCL_LINK_INT);
+ Tcl_LinkVar(interp, "__debug_test", (char *)&__debug_test,
+ TCL_LINK_INT);
+ LIST_INIT(&__db_infohead);
+ return (TCL_OK);
+}
+
+/*
+ * berkdb_cmd --
+ * Implements the "berkdb" command.
+ * This command supports three sub commands:
+ * berkdb version - Returns a list {major minor patch}
+ * berkdb env - Creates a new DB_ENV and returns a binding
+ * to a new command of the form dbenvX, where X is an
+ * integer starting at 0 (dbenv0, dbenv1, ...)
+ * berkdb open - Creates a new DB (optionally within
+ * the given environment. Returns a binding to a new
+ * command of the form dbX, where X is an integer
+ * starting at 0 (db0, db1, ...)
+ */
+static int
+berkdb_Cmd(notused, interp, objc, objv)
+ ClientData notused; /* Not used. */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *berkdbcmds[] = {
+#if CONFIG_TEST
+ "dbverify",
+ "handles",
+ "upgrade",
+#endif
+ "dbremove",
+ "dbrename",
+ "env",
+ "envremove",
+ "open",
+ "version",
+#if CONFIG_TEST
+ /* All below are compatibility functions */
+ "hcreate", "hsearch", "hdestroy",
+ "dbminit", "fetch", "store",
+ "delete", "firstkey", "nextkey",
+ "ndbm_open", "dbmclose",
+#endif
+ /* All below are convenience functions */
+ "rand", "random_int", "srand",
+ "debug_check",
+ NULL
+ };
+ /*
+ * All commands enums below ending in X are compatibility
+ */
+ enum berkdbcmds {
+#if CONFIG_TEST
+ BDB_DBVERIFY,
+ BDB_HANDLES,
+ BDB_UPGRADE,
+#endif
+ BDB_DBREMOVE,
+ BDB_DBRENAME,
+ BDB_ENV,
+ BDB_ENVREMOVE,
+ BDB_OPEN,
+ BDB_VERSION,
+#if CONFIG_TEST
+ BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX,
+ BDB_DBMINITX, BDB_FETCHX, BDB_STOREX,
+ BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX,
+ BDB_NDBMOPENX, BDB_DBMCLOSEX,
+#endif
+ BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX,
+ BDB_DBGCKX
+ };
+ static int env_id = 0;
+ static int db_id = 0;
+
+ DB *dbp;
+#if CONFIG_TEST
+ DBM *ndbmp;
+ static int ndbm_id = 0;
+#endif
+ DBTCL_INFO *ip;
+ DB_ENV *envp;
+ Tcl_Obj *res;
+ int cmdindex, result;
+ char newname[MSG_SIZE];
+
+ COMPQUIET(notused, NULL);
+
+ Tcl_ResetResult(interp);
+ memset(newname, 0, MSG_SIZE);
+ result = TCL_OK;
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], berkdbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum berkdbcmds)cmdindex) {
+#if CONFIG_TEST
+ case BDB_DBVERIFY:
+ result = bdb_DbVerify(interp, objc, objv);
+ break;
+ case BDB_HANDLES:
+ result = bdb_Handles(interp, objc, objv);
+ break;
+ case BDB_UPGRADE:
+ result = bdb_DbUpgrade(interp, objc, objv);
+ break;
+#endif
+ case BDB_VERSION:
+ _debug_check();
+ result = bdb_Version(interp, objc, objv);
+ break;
+ case BDB_ENV:
+ snprintf(newname, sizeof(newname), "env%d", env_id);
+ ip = _NewInfo(interp, NULL, newname, I_ENV);
+ if (ip != NULL) {
+ result = bdb_EnvOpen(interp, objc, objv, ip, &envp);
+ if (result == TCL_OK && envp != NULL) {
+ env_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)env_Cmd,
+ (ClientData)envp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, envp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case BDB_DBREMOVE:
+ result = bdb_DbRemove(interp, objc, objv);
+ break;
+ case BDB_DBRENAME:
+ result = bdb_DbRename(interp, objc, objv);
+ break;
+ case BDB_ENVREMOVE:
+ result = tcl_EnvRemove(interp, objc, objv, NULL, NULL);
+ break;
+ case BDB_OPEN:
+ snprintf(newname, sizeof(newname), "db%d", db_id);
+ ip = _NewInfo(interp, NULL, newname, I_DB);
+ if (ip != NULL) {
+ result = bdb_DbOpen(interp, objc, objv, ip, &dbp);
+ if (result == TCL_OK && dbp != NULL) {
+ db_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)db_Cmd,
+ (ClientData)dbp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, dbp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+#if CONFIG_TEST
+ case BDB_HCREATEX:
+ case BDB_HSEARCHX:
+ case BDB_HDESTROYX:
+ result = bdb_HCommand(interp, objc, objv);
+ break;
+ case BDB_DBMINITX:
+ case BDB_DBMCLOSEX:
+ case BDB_FETCHX:
+ case BDB_STOREX:
+ case BDB_DELETEX:
+ case BDB_FIRSTKEYX:
+ case BDB_NEXTKEYX:
+ result = bdb_DbmCommand(interp, objc, objv, DBTCL_DBM, NULL);
+ break;
+ case BDB_NDBMOPENX:
+ snprintf(newname, sizeof(newname), "ndbm%d", ndbm_id);
+ ip = _NewInfo(interp, NULL, newname, I_NDBM);
+ if (ip != NULL) {
+ result = bdb_NdbmOpen(interp, objc, objv, &ndbmp);
+ if (result == TCL_OK) {
+ ndbm_id++;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)ndbm_Cmd,
+ (ClientData)ndbmp, NULL);
+ /* Use ip->i_name - newname is overwritten */
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(ip, ndbmp);
+ } else
+ _DeleteInfo(ip);
+ } else {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+#endif
+ case BDB_RANDX:
+ case BDB_RAND_INTX:
+ case BDB_SRANDX:
+ result = bdb_RandCommand(interp, objc, objv);
+ break;
+ case BDB_DBGCKX:
+ _debug_check();
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * For each different arg call different function to create
+ * new commands (or if version, get/return it).
+ */
+ if (result == TCL_OK && res != NULL)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * bdb_EnvOpen -
+ * Implements the environment open command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 1. Call db_env_create to create the env handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DB_ENV->open to open the env.
+ * 5. Return env widget handle to user.
+ */
+static int
+bdb_EnvOpen(interp, objc, objv, ip, env)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB_ENV **env; /* Environment pointer */
+{
+ static char *envopen[] = {
+#if CONFIG_TEST
+ "-auto_commit",
+ "-cdb",
+ "-cdb_alldb",
+ "-client_timeout",
+ "-lock",
+ "-lock_conflict",
+ "-lock_detect",
+ "-lock_max",
+ "-lock_max_locks",
+ "-lock_max_lockers",
+ "-lock_max_objects",
+ "-lock_timeout",
+ "-log",
+ "-log_buffer",
+ "-log_max",
+ "-log_regionmax",
+ "-mmapsize",
+ "-nommap",
+ "-overwrite",
+ "-region_init",
+ "-rep_client",
+ "-rep_logsonly",
+ "-rep_master",
+ "-rep_transport",
+ "-server",
+ "-server_timeout",
+ "-txn_timeout",
+ "-txn_timestamp",
+ "-verbose",
+ "-wrnosync",
+#endif
+ "-cachesize",
+ "-create",
+ "-data_dir",
+ "-encryptaes",
+ "-encryptany",
+ "-errfile",
+ "-errpfx",
+ "-home",
+ "-log_dir",
+ "-mode",
+ "-private",
+ "-recover",
+ "-recover_fatal",
+ "-shm_key",
+ "-system_mem",
+ "-tmp_dir",
+ "-txn",
+ "-txn_max",
+ "-use_environ",
+ "-use_environ_root",
+ NULL
+ };
+ /*
+ * !!!
+ * These have to be in the same order as the above,
+ * which is close to but not quite alphabetical.
+ */
+ enum envopen {
+#if CONFIG_TEST
+ ENV_AUTO_COMMIT,
+ ENV_CDB,
+ ENV_CDB_ALLDB,
+ ENV_CLIENT_TO,
+ ENV_LOCK,
+ ENV_CONFLICT,
+ ENV_DETECT,
+ ENV_LOCK_MAX,
+ ENV_LOCK_MAX_LOCKS,
+ ENV_LOCK_MAX_LOCKERS,
+ ENV_LOCK_MAX_OBJECTS,
+ ENV_LOCK_TIMEOUT,
+ ENV_LOG,
+ ENV_LOG_BUFFER,
+ ENV_LOG_MAX,
+ ENV_LOG_REGIONMAX,
+ ENV_MMAPSIZE,
+ ENV_NOMMAP,
+ ENV_OVERWRITE,
+ ENV_REGION_INIT,
+ ENV_REP_CLIENT,
+ ENV_REP_LOGSONLY,
+ ENV_REP_MASTER,
+ ENV_REP_TRANSPORT,
+ ENV_SERVER,
+ ENV_SERVER_TO,
+ ENV_TXN_TIMEOUT,
+ ENV_TXN_TIME,
+ ENV_VERBOSE,
+ ENV_WRNOSYNC,
+#endif
+ ENV_CACHESIZE,
+ ENV_CREATE,
+ ENV_DATA_DIR,
+ ENV_ENCRYPT_AES,
+ ENV_ENCRYPT_ANY,
+ ENV_ERRFILE,
+ ENV_ERRPFX,
+ ENV_HOME,
+ ENV_LOG_DIR,
+ ENV_MODE,
+ ENV_PRIVATE,
+ ENV_RECOVER,
+ ENV_RECOVER_FATAL,
+ ENV_SHM_KEY,
+ ENV_SYSTEM_MEM,
+ ENV_TMP_DIR,
+ ENV_TXN,
+ ENV_TXN_MAX,
+ ENV_USE_ENVIRON,
+ ENV_USE_ENVIRON_ROOT
+ };
+ Tcl_Obj **myobjv, **myobjv1;
+ time_t timestamp;
+ u_int32_t detect, gbytes, bytes, ncaches, logbufset, logmaxset;
+ u_int32_t open_flags, rep_flags, set_flags, size, uintarg;
+ u_int8_t *conflicts;
+ int i, intarg, j, mode, myobjc, nmodes, optindex;
+ int result, ret, temp;
+ long client_to, server_to, shm;
+ char *arg, *home, *passwd, *server;
+
+ result = TCL_OK;
+ mode = 0;
+ rep_flags = set_flags = 0;
+ home = NULL;
+
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here in all cases. For now, turn it on only when testing
+ * so that we exercise MUTEX_THREAD_LOCK cases.
+ *
+ * Historically, a key stumbling block was the log_get interface,
+ * which could only do relative operations in a non-threaded
+ * environment. This is no longer an issue, thanks to log cursors,
+ * but we need to look at making sure DBTCL_INFO structs
+ * are safe to share across threads (they're not mutex-protected)
+ * before we declare the Tcl interface thread-safe. Meanwhile,
+ * there's no strong reason to enable DB_THREAD.
+ */
+ open_flags = DB_JOINENV |
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
+ logmaxset = logbufset = 0;
+
+ if (objc <= 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Server code must go before the call to db_env_create.
+ */
+ server = NULL;
+ server_to = client_to = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum envopen)optindex) {
+#if CONFIG_TEST
+ case ENV_SERVER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server hostname");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_SERVER_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &server_to);
+ break;
+ case ENV_CLIENT_TO:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-client_to secs");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ &client_to);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ if (server != NULL) {
+ ret = db_env_create(env, DB_CLIENT);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ if ((ret = (*env)->set_rpc_server((*env), NULL, server,
+ client_to, server_to, 0)) != 0) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ } else {
+ /*
+ * Create the environment handle before parsing the args
+ * since we'll be modifying the environment as we parse.
+ */
+ ret = db_env_create(env, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
+ (*env)->set_errpfx((*env), ip->i_name);
+ (*env)->set_errcall((*env), _ErrorFunc);
+ }
+
+ /* Hang our info pointer on the env handle, so we can do callbacks. */
+ (*env)->app_private = ip;
+
+ /*
+ * Use a Tcl-local alloc and free function so that we're sure to
+ * test whether we use umalloc/ufree in the right places.
+ */
+#ifdef TEST_ALLOC
+ (*env)->set_alloc(*env, tcl_db_malloc, tcl_db_realloc, tcl_db_free);
+#endif
+
+ /*
+ * Get the command name index from the object based on the bdbcmds
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ Tcl_ResetResult(interp);
+ if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envopen)optindex) {
+#if CONFIG_TEST
+ case ENV_SERVER:
+ case ENV_SERVER_TO:
+ case ENV_CLIENT_TO:
+ /*
+ * Already handled these, skip them and their arg.
+ */
+ i++;
+ break;
+ case ENV_AUTO_COMMIT:
+ FLD_SET(set_flags, DB_AUTO_COMMIT);
+ break;
+ case ENV_CDB:
+ FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_CDB_ALLDB:
+ FLD_SET(set_flags, DB_CDB_ALLDB);
+ break;
+ case ENV_LOCK:
+ FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_CONFLICT:
+ /*
+ * Get conflict list. List is:
+ * {nmodes {matrix}}
+ *
+ * Where matrix must be nmodes*nmodes big.
+ * Set up conflicts array to pass.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, myobjv[0], &nmodes);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_ListObjGetElements(interp, myobjv[1],
+ &myobjc, &myobjv1);
+ if (myobjc != (nmodes * nmodes)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_conflict {nmodes {matrix}}?");
+ result = TCL_ERROR;
+ break;
+ }
+ size = sizeof(u_int8_t) * nmodes*nmodes;
+ ret = __os_malloc(*env, size, &conflicts);
+ if (ret != 0) {
+ result = TCL_ERROR;
+ break;
+ }
+ for (j = 0; j < myobjc; j++) {
+ result = Tcl_GetIntFromObj(interp, myobjv1[j],
+ &temp);
+ conflicts[j] = temp;
+ if (result != TCL_OK) {
+ __os_free(NULL, conflicts);
+ break;
+ }
+ }
+ _debug_check();
+ ret = (*env)->set_lk_conflicts(*env,
+ (u_int8_t *)conflicts, nmodes);
+ __os_free(NULL, conflicts);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lk_conflicts");
+ break;
+ case ENV_DETECT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_detect policy?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (strcmp(arg, "default") == 0)
+ detect = DB_LOCK_DEFAULT;
+ else if (strcmp(arg, "expire") == 0)
+ detect = DB_LOCK_EXPIRE;
+ else if (strcmp(arg, "maxlocks") == 0)
+ detect = DB_LOCK_MAXLOCKS;
+ else if (strcmp(arg, "minlocks") == 0)
+ detect = DB_LOCK_MINLOCKS;
+ else if (strcmp(arg, "minwrites") == 0)
+ detect = DB_LOCK_MINWRITE;
+ else if (strcmp(arg, "oldest") == 0)
+ detect = DB_LOCK_OLDEST;
+ else if (strcmp(arg, "youngest") == 0)
+ detect = DB_LOCK_YOUNGEST;
+ else if (strcmp(arg, "random") == 0)
+ detect = DB_LOCK_RANDOM;
+ else {
+ Tcl_AddErrorInfo(interp,
+ "lock_detect: illegal policy");
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (*env)->set_lk_detect(*env, detect);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_detect");
+ break;
+ case ENV_LOCK_MAX:
+ case ENV_LOCK_MAX_LOCKS:
+ case ENV_LOCK_MAX_LOCKERS:
+ case ENV_LOCK_MAX_OBJECTS:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-lock_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ switch ((enum envopen)optindex) {
+ case ENV_LOCK_MAX:
+ ret = (*env)->set_lk_max(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_LOCKS:
+ ret = (*env)->set_lk_max_locks(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_LOCKERS:
+ ret = (*env)->set_lk_max_lockers(*env,
+ uintarg);
+ break;
+ case ENV_LOCK_MAX_OBJECTS:
+ ret = (*env)->set_lk_max_objects(*env,
+ uintarg);
+ break;
+ default:
+ break;
+ }
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock_max");
+ }
+ break;
+ case ENV_TXN_TIME:
+ case ENV_TXN_TIMEOUT:
+ case ENV_LOCK_TIMEOUT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)&timestamp);
+ if (result == TCL_OK) {
+ _debug_check();
+ if (optindex == ENV_TXN_TIME)
+ ret = (*env)->
+ set_tx_timestamp(*env, &timestamp);
+ else
+ ret = (*env)->set_timeout(*env,
+ (db_timeout_t)timestamp,
+ optindex == ENV_TXN_TIMEOUT ?
+ DB_SET_TXN_TIMEOUT :
+ DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "txn_timestamp");
+ }
+ break;
+ case ENV_LOG:
+ FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_LOG_BUFFER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_buffer size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_bsize(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_bsize");
+ logbufset = 1;
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env,
+ logmaxset);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logmaxset = 0;
+ logbufset = 0;
+ }
+ }
+ break;
+ case ENV_LOG_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK && logbufset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logbufset = 0;
+ } else
+ logmaxset = uintarg;
+ break;
+ case ENV_LOG_REGIONMAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_regionmax size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_regionmax(*env, uintarg);
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_regionmax");
+ }
+ break;
+ case ENV_MMAPSIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mmapsize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_mp_mmapsize(*env,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "mmapsize");
+ }
+ break;
+ case ENV_NOMMAP:
+ FLD_SET(set_flags, DB_NOMMAP);
+ break;
+ case ENV_OVERWRITE:
+ FLD_SET(set_flags, DB_OVERWRITE);
+ break;
+ case ENV_REGION_INIT:
+ _debug_check();
+ ret = (*env)->set_flags(*env, DB_REGION_INIT, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "region_init");
+ break;
+ case ENV_REP_CLIENT:
+ rep_flags = DB_REP_CLIENT;
+ break;
+ case ENV_REP_LOGSONLY:
+ rep_flags = DB_REP_LOGSONLY;
+ break;
+ case ENV_REP_MASTER:
+ rep_flags = DB_REP_MASTER;
+ break;
+ case ENV_REP_TRANSPORT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-rep_transport {envid sendproc}");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the objects containing the machine ID
+ * and the procedure name. We don't need to crack
+ * the send procedure out now, but we do convert the
+ * machine ID to an int, since set_rep_transport needs
+ * it. Even so, it'll be easier later to deal with
+ * the Tcl_Obj *, so we save that, not the int.
+ *
+ * Note that we Tcl_IncrRefCount both objects
+ * independently; Tcl is free to discard the list
+ * that they're bundled into.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (myobjc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {envid sendproc}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Check that the machine ID is an int. Note that
+ * we do want to use GetIntFromObj; the machine
+ * ID is explicitly an int, not a u_int32_t.
+ */
+ ip->i_rep_eid = myobjv[0];
+ Tcl_IncrRefCount(ip->i_rep_eid);
+ result = Tcl_GetIntFromObj(interp,
+ ip->i_rep_eid, &intarg);
+ if (result != TCL_OK)
+ break;
+
+ ip->i_rep_send = myobjv[1];
+ Tcl_IncrRefCount(ip->i_rep_send);
+ _debug_check();
+ ret = (*env)->set_rep_transport(*env,
+ intarg, tcl_rep_send);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rep_transport");
+ break;
+ case ENV_VERBOSE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-verbose {which on|off}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = tcl_EnvVerbose(interp, *env,
+ myobjv[0], myobjv[1]);
+ break;
+ case ENV_WRNOSYNC:
+ FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC);
+ break;
+#endif
+ case ENV_TXN:
+ FLD_SET(open_flags, DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN);
+ FLD_CLR(open_flags, DB_JOINENV);
+ /* Make sure we have an arg to check against! */
+ if (i < objc) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (strcmp(arg, "nosync") == 0) {
+ FLD_SET(set_flags, DB_TXN_NOSYNC);
+ i++;
+ }
+ }
+ break;
+ case ENV_CREATE:
+ FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case ENV_PRIVATE:
+ FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_RECOVER:
+ FLD_SET(open_flags, DB_RECOVER);
+ break;
+ case ENV_RECOVER_FATAL:
+ FLD_SET(open_flags, DB_RECOVER_FATAL);
+ break;
+ case ENV_SYSTEM_MEM:
+ FLD_SET(open_flags, DB_SYSTEM_MEM);
+ break;
+ case ENV_USE_ENVIRON_ROOT:
+ FLD_SET(open_flags, DB_USE_ENVIRON_ROOT);
+ break;
+ case ENV_USE_ENVIRON:
+ FLD_SET(open_flags, DB_USE_ENVIRON);
+ break;
+ case ENV_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*env)->set_cachesize(*env, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_cachesize");
+ break;
+ case ENV_SHM_KEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-shm_key key?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++], &shm);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_shm_key(*env, shm);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "shm_key");
+ }
+ break;
+ case ENV_TXN_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_tx_max(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "txn_max");
+ }
+ break;
+ case ENV_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (ip->i_err != NULL)
+ fclose(ip->i_err);
+ ip->i_err = fopen(arg, "a");
+ if (ip->i_err != NULL) {
+ _debug_check();
+ (*env)->set_errfile(*env, ip->i_err);
+ }
+ break;
+ case ENV_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (ip->i_errpfx != NULL)
+ __os_free(NULL, ip->i_errpfx);
+ if ((ret =
+ __os_strdup(*env, arg, &ip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ if (ip->i_errpfx != NULL) {
+ _debug_check();
+ (*env)->set_errpfx(*env, ip->i_errpfx);
+ }
+ break;
+ case ENV_DATA_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_data_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
+ break;
+ case ENV_LOG_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_lg_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lg_dir");
+ break;
+ case ENV_TMP_DIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_tmp_dir(*env, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * We have to check this here. We want to set the log buffer
+ * size first, if it is specified. So if the user did so,
+ * then we took care of it above. But, if we get out here and
+ * logmaxset is non-zero, then they set the log_max without
+ * resetting the log buffer size, so we now have to do the
+ * call to set_lg_max, since we didn't do it above.
+ */
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, (u_int32_t)logmaxset);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_max");
+ }
+
+ if (result != TCL_OK)
+ goto error;
+
+ if (set_flags) {
+ ret = (*env)->set_flags(*env, set_flags, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+ /*
+ * When we get here, we have already parsed all of our args
+ * and made all our calls to set up the environment. Everything
+ * is okay so far, no errors, if we get here.
+ *
+ * Now open the environment.
+ */
+ _debug_check();
+ ret = (*env)->open(*env, home, open_flags, mode);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env open");
+
+ if (rep_flags != 0 && result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->rep_start(*env, NULL, rep_flags);
+ result = _ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "rep_start");
+ }
+
+error: if (result == TCL_ERROR) {
+ if (ip->i_err) {
+ fclose(ip->i_err);
+ ip->i_err = NULL;
+ }
+ (void)(*env)->close(*env, 0);
+ *env = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbOpen --
+ * Implements the "db_create/db_open" command.
+ * There are many, many options to the open command.
+ * Here is the general flow:
+ *
+ * 0. Preparse args to determine if we have -env.
+ * 1. Call db_create to create the db handle.
+ * 2. Parse args tracking options.
+ * 3. Make any pre-open setup calls necessary.
+ * 4. Call DB->open to open the database.
+ * 5. Return db widget handle to user.
+ */
+static int
+bdb_DbOpen(interp, objc, objv, ip, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBTCL_INFO *ip; /* Our internal info */
+ DB **dbp; /* DB handle */
+{
+ static char *bdbenvopen[] = {
+ "-env", NULL
+ };
+ enum bdbenvopen {
+ TCL_DB_ENV0
+ };
+ static char *bdbopen[] = {
+#if CONFIG_TEST
+ "-btcompare",
+ "-dirty",
+ "-dupcompare",
+ "-hashproc",
+ "-lorder",
+ "-minkey",
+ "-nommap",
+ "-revsplitoff",
+ "-test",
+#endif
+ "-auto_commit",
+ "-btree",
+ "-cachesize",
+ "-chksum",
+ "-create",
+ "-delim",
+ "-dup",
+ "-dupsort",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "-excl",
+ "-extent",
+ "-ffactor",
+ "-hash",
+ "-len",
+ "-mode",
+ "-nelem",
+ "-pad",
+ "-pagesize",
+ "-queue",
+ "-rdonly",
+ "-recno",
+ "-recnum",
+ "-renumber",
+ "-snapshot",
+ "-source",
+ "-truncate",
+ "-txn",
+ "-unknown",
+ "--",
+ NULL
+ };
+ enum bdbopen {
+#if CONFIG_TEST
+ TCL_DB_BTCOMPARE,
+ TCL_DB_DIRTY,
+ TCL_DB_DUPCOMPARE,
+ TCL_DB_HASHPROC,
+ TCL_DB_LORDER,
+ TCL_DB_MINKEY,
+ TCL_DB_NOMMAP,
+ TCL_DB_REVSPLIT,
+ TCL_DB_TEST,
+#endif
+ TCL_DB_AUTO_COMMIT,
+ TCL_DB_BTREE,
+ TCL_DB_CACHESIZE,
+ TCL_DB_CHKSUM,
+ TCL_DB_CREATE,
+ TCL_DB_DELIM,
+ TCL_DB_DUP,
+ TCL_DB_DUPSORT,
+ TCL_DB_ENCRYPT,
+ TCL_DB_ENCRYPT_AES,
+ TCL_DB_ENCRYPT_ANY,
+ TCL_DB_ENV,
+ TCL_DB_ERRFILE,
+ TCL_DB_ERRPFX,
+ TCL_DB_EXCL,
+ TCL_DB_EXTENT,
+ TCL_DB_FFACTOR,
+ TCL_DB_HASH,
+ TCL_DB_LEN,
+ TCL_DB_MODE,
+ TCL_DB_NELEM,
+ TCL_DB_PAD,
+ TCL_DB_PAGESIZE,
+ TCL_DB_QUEUE,
+ TCL_DB_RDONLY,
+ TCL_DB_RECNO,
+ TCL_DB_RECNUM,
+ TCL_DB_RENUMBER,
+ TCL_DB_SNAPSHOT,
+ TCL_DB_SOURCE,
+ TCL_DB_TRUNCATE,
+ TCL_DB_TXN,
+ TCL_DB_UNKNOWN,
+ TCL_DB_ENDARG
+ };
+
+ DBTCL_INFO *envip, *errip;
+ DB_TXN *txn;
+ DBTYPE type;
+ DB_ENV *envp;
+ Tcl_Obj **myobjv;
+ u_int32_t gbytes, bytes, ncaches, open_flags, uintarg;
+ int endarg, i, intarg, mode, myobjc;
+ int optindex, result, ret, set_err, set_flags, set_pfx, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *passwd, *subdb, msg[MSG_SIZE];
+
+ type = DB_UNKNOWN;
+ endarg = mode = set_err = set_flags = set_pfx = 0;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+
+ /*
+ * XXX
+ * If/when our Tcl interface becomes thread-safe, we should enable
+ * DB_THREAD here in all cases. See comment in bdb_EnvOpen().
+ * For now, just turn it on when testing so that we exercise
+ * MUTEX_THREAD_LOCK cases.
+ */
+ open_flags =
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
+ envp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i++], bdbenvopen,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ Tcl_ResetResult(interp);
+ continue;
+ }
+ switch ((enum bdbenvopen)optindex) {
+ case TCL_DB_ENV0:
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db open: illegal environment", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+ break;
+ }
+
+ /*
+ * Create the db handle before parsing the args
+ * since we'll be modifying the database options as we parse.
+ */
+ ret = db_create(dbp, envp, 0);
+ if (ret)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create"));
+
+ /* Hang our info pointer on the DB handle, so we can do callbacks. */
+ (*dbp)->api_internal = ip;
+
+ /*
+ * XXX Remove restriction when err stuff is not tied to env.
+ *
+ * The DB->set_err* functions actually overwrite in the
+ * environment. So, if we are explicitly using an env,
+ * don't overwrite what we have already set up. If we are
+ * not using one, then we set up since we get a private
+ * default env.
+ */
+ /* XXX - remove this conditional if/when err is not tied to env */
+ if (envp == NULL) {
+ (*dbp)->set_errpfx((*dbp), ip->i_name);
+ (*dbp)->set_errcall((*dbp), _ErrorFunc);
+ }
+ envip = _PtrToInfo(envp); /* XXX */
+ /*
+ * If we are using an env, we keep track of err info in the env's ip.
+ * Otherwise use the DB's ip.
+ */
+ if (envip)
+ errip = envip;
+ else
+ errip = ip;
+ /*
+ * Get the option name index from the object based on the args
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ Tcl_ResetResult(interp);
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbopen)optindex) {
+#if CONFIG_TEST
+ case TCL_DB_BTCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-btcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * We don't need to crack it out now--we'll want
+ * to bundle it up to pass into Tcl_EvalObjv anyway.
+ * Tcl's object refcounting will--I hope--take care
+ * of the memory management here.
+ */
+ ip->i_btcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_btcompare);
+ _debug_check();
+ ret = (*dbp)->set_bt_compare(*dbp, tcl_bt_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_bt_compare");
+ break;
+ case TCL_DB_DIRTY:
+ open_flags |= DB_DIRTY_READ;
+ break;
+ case TCL_DB_DUPCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-dupcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_dupcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_dupcompare);
+ _debug_check();
+ ret = (*dbp)->set_dup_compare(*dbp, tcl_dup_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_dup_compare");
+ break;
+ case TCL_DB_HASHPROC:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-hashproc hashproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_hashproc = objv[i++];
+ Tcl_IncrRefCount(ip->i_hashproc);
+ _debug_check();
+ ret = (*dbp)->set_h_hash(*dbp, tcl_h_hash);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_h_hash");
+ break;
+ case TCL_DB_LORDER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-lorder 1234|4321");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_lorder(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_lorder");
+ }
+ break;
+ case TCL_DB_MINKEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-minkey minkey");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_bt_minkey(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_bt_minkey");
+ }
+ break;
+ case TCL_DB_NOMMAP:
+ open_flags |= DB_NOMMAP;
+ break;
+ case TCL_DB_REVSPLIT:
+ set_flags |= DB_REVSPLITOFF;
+ break;
+ case TCL_DB_TEST:
+ (*dbp)->set_h_hash(*dbp, __ham_test);
+ break;
+#endif
+ case TCL_DB_AUTO_COMMIT:
+ open_flags |= DB_AUTO_COMMIT;
+ break;
+ case TCL_DB_ENV:
+ /*
+ * Already parsed this, skip it and the env pointer.
+ */
+ i++;
+ continue;
+ case TCL_DB_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ case TCL_DB_BTREE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_BTREE;
+ break;
+ case TCL_DB_HASH:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_HASH;
+ break;
+ case TCL_DB_RECNO:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_RECNO;
+ break;
+ case TCL_DB_QUEUE:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ type = DB_QUEUE;
+ break;
+ case TCL_DB_UNKNOWN:
+ if (type != DB_UNKNOWN) {
+ Tcl_SetResult(interp,
+ "Too many DB types specified", TCL_STATIC);
+ result = TCL_ERROR;
+ goto error;
+ }
+ break;
+ case TCL_DB_CREATE:
+ open_flags |= DB_CREATE;
+ break;
+ case TCL_DB_EXCL:
+ open_flags |= DB_EXCL;
+ break;
+ case TCL_DB_RDONLY:
+ open_flags |= DB_RDONLY;
+ break;
+ case TCL_DB_TRUNCATE:
+ open_flags |= DB_TRUNCATE;
+ break;
+ case TCL_DB_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case TCL_DB_DUP:
+ set_flags |= DB_DUP;
+ break;
+ case TCL_DB_DUPSORT:
+ set_flags |= DB_DUPSORT;
+ break;
+ case TCL_DB_RECNUM:
+ set_flags |= DB_RECNUM;
+ break;
+ case TCL_DB_RENUMBER:
+ set_flags |= DB_RENUMBER;
+ break;
+ case TCL_DB_SNAPSHOT:
+ set_flags |= DB_SNAPSHOT;
+ break;
+ case TCL_DB_CHKSUM:
+ set_flags |= DB_CHKSUM_SHA1;
+ break;
+ case TCL_DB_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ break;
+ case TCL_DB_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case TCL_DB_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case TCL_DB_FFACTOR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-ffactor density");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_ffactor(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_h_ffactor");
+ }
+ break;
+ case TCL_DB_NELEM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-nelem nelem");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_h_nelem(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_h_nelem");
+ }
+ break;
+ case TCL_DB_DELIM:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-delim delim");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_delim(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_delim");
+ }
+ break;
+ case TCL_DB_LEN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-len length");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_len(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_len");
+ }
+ break;
+ case TCL_DB_PAD:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-pad pad");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_re_pad(*dbp, intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_re_pad");
+ }
+ break;
+ case TCL_DB_SOURCE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-source file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_re_source(*dbp, arg);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_re_source");
+ break;
+ case TCL_DB_EXTENT:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-extent size");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_q_extentsize(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_q_extentsize");
+ }
+ break;
+ case TCL_DB_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (result != TCL_OK)
+ break;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_cachesize");
+ break;
+ case TCL_DB_PAGESIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_pagesize(*dbp,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set pagesize");
+ }
+ break;
+ case TCL_DB_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errip->i_err != NULL)
+ fclose(errip->i_err);
+ errip->i_err = fopen(arg, "a");
+ if (errip->i_err != NULL) {
+ _debug_check();
+ (*dbp)->set_errfile(*dbp, errip->i_err);
+ set_err = 1;
+ }
+ break;
+ case TCL_DB_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errip->i_errpfx != NULL)
+ __os_free(NULL, errip->i_errpfx);
+ if ((ret = __os_strdup((*dbp)->dbenv,
+ arg, &errip->i_errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ if (errip->i_errpfx != NULL) {
+ _debug_check();
+ (*dbp)->set_errpfx(*dbp, errip->i_errpfx);
+ set_pfx = 1;
+ }
+ break;
+ case TCL_DB_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
+
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ /*
+ * Any args we have left, (better be 0, 1 or 2 left) are
+ * file names. If we have 0, then an in-memory db. If
+ * there is 1, a db name, if 2 a db and subdb name.
+ */
+ if (i != objc) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp,
+ subdblen + 1, &subdb)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret),
+ TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ }
+ if (set_flags) {
+ ret = (*dbp)->set_flags(*dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result == TCL_ERROR)
+ goto error;
+ /*
+ * If we are successful, clear the result so that the
+ * return from set_flags isn't part of the result.
+ */
+ Tcl_ResetResult(interp);
+ }
+
+ /*
+ * When we get here, we have already parsed all of our args and made
+ * all our calls to set up the database. Everything is okay so far,
+ * no errors, if we get here.
+ */
+ _debug_check();
+
+ /* Open the database. */
+ ret = (*dbp)->open(*dbp, txn, db, subdb, type, open_flags, mode);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db open");
+
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (result == TCL_ERROR) {
+ (void)(*dbp)->close(*dbp, 0);
+ /*
+ * If we opened and set up the error file in the environment
+ * on this open, but we failed for some other reason, clean
+ * up and close the file.
+ *
+ * XXX when err stuff isn't tied to env, change to use ip,
+ * instead of envip. Also, set_err is irrelevant when that
+ * happens. It will just read:
+ * if (ip->i_err)
+ * fclose(ip->i_err);
+ */
+ if (set_err && errip && errip->i_err != NULL) {
+ fclose(errip->i_err);
+ errip->i_err = NULL;
+ }
+ if (set_pfx && errip && errip->i_errpfx != NULL) {
+ __os_free(envp, errip->i_errpfx);
+ errip->i_errpfx = NULL;
+ }
+ *dbp = NULL;
+ }
+ return (result);
+}
+
+/*
+ * bdb_DbRemove --
+ * Implements the DB_ENV->remove and DB->remove command.
+ */
+static int
+bdb_DbRemove(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbrem[] = {
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum bdbrem {
+ TCL_DBREM_AUTOCOMMIT,
+ TCL_DBREM_ENCRYPT,
+ TCL_DBREM_ENCRYPT_AES,
+ TCL_DBREM_ENCRYPT_ANY,
+ TCL_DBREM_ENV,
+ TCL_DBREM_TXN,
+ TCL_DBREM_ENDARG
+ };
+ DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_int32_t enc_flag, iflags, set_flags;
+ u_char *subdbtmp;
+ char *arg, *db, msg[MSG_SIZE], *passwd, *subdb;
+
+ db = subdb = NULL;
+ dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbrem)optindex) {
+ case TCL_DBREM_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBREM_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBREM_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db remove: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBREM_ENDARG:
+ endarg = 1;
+ break;
+ case TCL_DBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ &subdb)) != 0) { Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ }
+
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ _debug_check();
+ if (dbp == NULL)
+ ret = envp->dbremove(envp, txn, db, subdb, iflags);
+ else
+ ret = dbp->remove(dbp, db, subdb, 0);
+
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db remove");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (result == TCL_ERROR && dbp != NULL)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+/*
+ * bdb_DbRename --
+ * Implements the DBENV->dbrename and DB->rename commands.
+ */
+static int
+bdb_DbRename(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbmv[] = {
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum bdbmv {
+ TCL_DBMV_AUTOCOMMIT,
+ TCL_DBMV_ENCRYPT,
+ TCL_DBMV_ENCRYPT_AES,
+ TCL_DBMV_ENCRYPT_ANY,
+ TCL_DBMV_ENV,
+ TCL_DBMV_TXN,
+ TCL_DBMV_ENDARG
+ };
+ DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
+ u_int32_t enc_flag, iflags, set_flags;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, msg[MSG_SIZE], *newname, *passwd, *subdb;
+
+ db = newname = subdb = NULL;
+ dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ txn = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp,
+ 3, objv, "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbmv)optindex) {
+ case TCL_DBMV_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBMV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBMV_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db rename: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBMV_ENDARG:
+ endarg = 1;
+ break;
+ case TCL_DBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a file name, if 3 a file and db name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(envp, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(envp, newlen + 1,
+ &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(
+ interp, 3, objv, "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ }
+
+ /*
+ * No matter what, we NULL out dbp after this call.
+ */
+ if (dbp == NULL)
+ ret = envp->dbrename(envp, txn, db, subdb, newname, iflags);
+ else
+ ret = dbp->rename(dbp, db, subdb, newname, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db rename");
+ dbp = NULL;
+error:
+ if (subdb)
+ __os_free(envp, subdb);
+ if (newname)
+ __os_free(envp, newname);
+ if (result == TCL_ERROR && dbp != NULL)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * bdb_DbVerify --
+ * Implements the DB->verify command.
+ */
+static int
+bdb_DbVerify(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbverify[] = {
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "--",
+ NULL
+ };
+ enum bdbvrfy {
+ TCL_DBVRFY_ENCRYPT,
+ TCL_DBVRFY_ENCRYPT_AES,
+ TCL_DBVRFY_ENCRYPT_ANY,
+ TCL_DBVRFY_ENV,
+ TCL_DBVRFY_ERRFILE,
+ TCL_DBVRFY_ERRPFX,
+ TCL_DBVRFY_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ FILE *errf;
+ u_int32_t enc_flag, flags, set_flags;
+ int endarg, i, optindex, result, ret;
+ char *arg, *db, *errpfx, *passwd;
+
+ envp = NULL;
+ dbp = NULL;
+ passwd = NULL;
+ result = TCL_OK;
+ db = errpfx = NULL;
+ errf = NULL;
+ flags = endarg = 0;
+ enc_flag = set_flags = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbverify,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbvrfy)optindex) {
+ case TCL_DBVRFY_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBVRFY_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBVRFY_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case TCL_DBVRFY_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db verify: illegal environment",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ERRFILE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errfile file");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, close it.
+ */
+ if (errf != NULL)
+ fclose(errf);
+ errf = fopen(arg, "a");
+ break;
+ case TCL_DBVRFY_ERRPFX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-errpfx prefix");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ /*
+ * If the user already set one, free it.
+ */
+ if (errpfx != NULL)
+ __os_free(envp, errpfx);
+ if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "__os_strdup");
+ break;
+ }
+ break;
+ case TCL_DBVRFY_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
+ if (errf != NULL)
+ dbp->set_errfile(dbp, errf);
+ if (errpfx != NULL)
+ dbp->set_errpfx(dbp, errpfx);
+
+ ret = dbp->verify(dbp, db, NULL, NULL, flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db verify");
+error:
+ if (errf != NULL)
+ fclose(errf);
+ if (errpfx != NULL)
+ __os_free(envp, errpfx);
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+#endif
+
+/*
+ * bdb_Version --
+ * Implements the version command.
+ */
+static int
+bdb_Version(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbver[] = {
+ "-string", NULL
+ };
+ enum bdbver {
+ TCL_VERSTRING
+ };
+ int i, optindex, maj, min, patch, result, string, verobjc;
+ char *arg, *v;
+ Tcl_Obj *res, *verobjv[3];
+
+ result = TCL_OK;
+ string = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbver,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbver)optindex) {
+ case TCL_VERSTRING:
+ string = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (result != TCL_OK)
+ goto error;
+
+ v = db_version(&maj, &min, &patch);
+ if (string)
+ res = Tcl_NewStringObj(v, strlen(v));
+ else {
+ verobjc = 3;
+ verobjv[0] = Tcl_NewIntObj(maj);
+ verobjv[1] = Tcl_NewIntObj(min);
+ verobjv[2] = Tcl_NewIntObj(patch);
+ res = Tcl_NewListObj(verobjc, verobjv);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * bdb_Handles --
+ * Implements the handles command.
+ */
+static int
+bdb_Handles(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DBTCL_INFO *p;
+ Tcl_Obj *res, *handle;
+
+ /*
+ * No args. Error if we have some
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return (TCL_ERROR);
+ }
+ res = Tcl_NewListObj(0, NULL);
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries)) {
+ handle = Tcl_NewStringObj(p->i_name, strlen(p->i_name));
+ if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK)
+ return (TCL_ERROR);
+ }
+ Tcl_SetObjResult(interp, res);
+ return (TCL_OK);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * bdb_DbUpgrade --
+ * Implements the DB->upgrade command.
+ */
+static int
+bdb_DbUpgrade(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *bdbupg[] = {
+ "-dupsort", "-env", "--", NULL
+ };
+ enum bdbupg {
+ TCL_DBUPG_DUPSORT,
+ TCL_DBUPG_ENV,
+ TCL_DBUPG_ENDARG
+ };
+ DB_ENV *envp;
+ DB *dbp;
+ u_int32_t flags;
+ int endarg, i, optindex, result, ret;
+ char *arg, *db;
+
+ envp = NULL;
+ dbp = NULL;
+ result = TCL_OK;
+ db = NULL;
+ flags = endarg = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], bdbupg,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum bdbupg)optindex) {
+ case TCL_DBUPG_DUPSORT:
+ flags |= DB_DUPSORT;
+ break;
+ case TCL_DBUPG_ENV:
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ envp = NAME_TO_ENV(arg);
+ if (envp == NULL) {
+ Tcl_SetResult(interp,
+ "db upgrade: illegal environment",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_DBUPG_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * The remaining arg is the db filename.
+ */
+ if (i == (objc - 1))
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ ret = dbp->upgrade(dbp, db, flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db upgrade");
+error:
+ if (dbp)
+ (void)dbp->close(dbp, 0);
+ return (result);
+}
+#endif
+
+/*
+ * tcl_bt_compare and tcl_dup_compare --
+ * These two are basically identical internally, so may as well
+ * share code. The only differences are the name used in error
+ * reporting and the Tcl_Obj representing their respective procs.
+ */
+static int
+tcl_bt_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_btcompare, "bt_compare"));
+}
+
+static int
+tcl_dup_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_dupcompare, "dup_compare"));
+}
+
+/*
+ * tcl_compare_callback --
+ * Tcl callback for set_bt_compare and set_dup_compare. What this
+ * function does is stuff the data fields of the two DBTs into Tcl ByteArray
+ * objects, then call the procedure stored in ip->i_btcompare on the two
+ * objects. Then we return that procedure's result as the comparison.
+ */
+static int
+tcl_compare_callback(dbp, dbta, dbtb, procobj, errname)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+ Tcl_Obj *procobj;
+ char *errname;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *a, *b, *resobj, *objv[3];
+ int result, cmp;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = procobj;
+
+ /*
+ * Create two ByteArray objects, with the two data we've been passed.
+ * This will involve a copy, which is unpleasantly slow, but there's
+ * little we can do to avoid this (I think).
+ */
+ a = Tcl_NewByteArrayObj(dbta->data, dbta->size);
+ Tcl_IncrRefCount(a);
+ b = Tcl_NewByteArrayObj(dbtb->data, dbtb->size);
+ Tcl_IncrRefCount(b);
+
+ objv[1] = a;
+ objv[2] = b;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * If this or the next Tcl call fails, we're doomed.
+ * There's no way to return an error from comparison functions,
+ * no way to determine what the correct sort order is, and
+ * so no way to avoid corrupting the database if we proceed.
+ * We could play some games stashing return values on the
+ * DB handle, but it's not worth the trouble--no one with
+ * any sense is going to be using this other than for testing,
+ * and failure typically means that the bt_compare proc
+ * had a syntax error in it or something similarly dumb.
+ *
+ * So, drop core. If we're not running with diagnostic
+ * mode, panic--and always return a negative number. :-)
+ */
+panic: __db_err(dbp->dbenv, "Tcl %s callback failed", errname);
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &cmp);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(a);
+ Tcl_DecrRefCount(b);
+ return (cmp);
+}
+
+/*
+ * tcl_h_hash --
+ * Tcl callback for the hashing function. See tcl_compare_callback--
+ * this works much the same way, only we're given a buffer and a length
+ * instead of two DBTs.
+ */
+static u_int32_t
+tcl_h_hash(dbp, buf, len)
+ DB *dbp;
+ const void *buf;
+ u_int32_t len;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *objv[2];
+ int result, hval;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_hashproc;
+
+ /*
+ * Create a ByteArray for the buffer.
+ */
+ objv[1] = Tcl_NewByteArrayObj((void *)buf, len);
+ Tcl_IncrRefCount(objv[1]);
+ result = Tcl_EvalObjv(interp, 2, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * We drop core on error. See the comment in
+ * tcl_compare_callback.
+ */
+panic: __db_err(dbp->dbenv, "Tcl h_hash callback failed");
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(objv[1]);
+ return (hval);
+}
+
+/*
+ * tcl_rep_send --
+ * Replication send callback.
+ */
+static int
+tcl_rep_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *control_o, *eid_o, *origobj, *rec_o, *resobj, *objv[5];
+ int result, ret;
+
+ COMPQUIET(flags, 0);
+
+ ip = (DBTCL_INFO *)dbenv->app_private;
+ interp = ip->i_interp;
+ objv[0] = ip->i_rep_send;
+
+ control_o = Tcl_NewByteArrayObj(control->data, control->size);
+ Tcl_IncrRefCount(control_o);
+
+ rec_o = Tcl_NewByteArrayObj(rec->data, rec->size);
+ Tcl_IncrRefCount(rec_o);
+
+ eid_o = Tcl_NewIntObj(eid);
+ Tcl_IncrRefCount(eid_o);
+
+ objv[1] = control_o;
+ objv[2] = rec_o;
+ objv[3] = ip->i_rep_eid; /* From ID */
+ objv[4] = eid_o; /* To ID */
+
+ /*
+ * We really want to return the original result to the
+ * user. So, save the result obj here, and then after
+ * we've taken care of the Tcl_EvalObjv, set the result
+ * back to this original result.
+ */
+ origobj = Tcl_GetObjResult(interp);
+ Tcl_IncrRefCount(origobj);
+ result = Tcl_EvalObjv(interp, 5, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * This probably isn't the right error behavior, but
+ * this error should only happen if the Tcl callback is
+ * somehow invalid, which is a fatal scripting bug.
+ */
+err: __db_err(dbenv, "Tcl rep_send failure");
+ return (EINVAL);
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &ret);
+ if (result != TCL_OK)
+ goto err;
+
+ Tcl_SetObjResult(interp, origobj);
+ Tcl_DecrRefCount(origobj);
+ Tcl_DecrRefCount(control_o);
+ Tcl_DecrRefCount(rec_o);
+ Tcl_DecrRefCount(eid_o);
+
+ return (ret);
+}
+
+#ifdef TEST_ALLOC
+/*
+ * tcl_db_malloc, tcl_db_realloc, tcl_db_free --
+ * Tcl-local malloc, realloc, and free functions to use for user data
+ * to exercise umalloc/urealloc/ufree. Allocate the memory as a Tcl object
+ * so we're sure to exacerbate and catch any shared-library issues.
+ */
+static void *
+tcl_db_malloc(size)
+ size_t size;
+{
+ Tcl_Obj *obj;
+ void *buf;
+
+ obj = Tcl_NewObj();
+ if (obj == NULL)
+ return (NULL);
+ Tcl_IncrRefCount(obj);
+
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+ buf = Tcl_GetString(obj);
+ memcpy(buf, &obj, sizeof(&obj));
+
+ buf = (Tcl_Obj **)buf + 1;
+ return (buf);
+}
+
+static void *
+tcl_db_realloc(ptr, size)
+ void *ptr;
+ size_t size;
+{
+ Tcl_Obj *obj;
+
+ if (ptr == NULL)
+ return (tcl_db_malloc(size));
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+
+ ptr = Tcl_GetString(obj);
+ memcpy(ptr, &obj, sizeof(&obj));
+
+ ptr = (Tcl_Obj **)ptr + 1;
+ return (ptr);
+}
+
+static void
+tcl_db_free(ptr)
+ void *ptr;
+{
+ Tcl_Obj *obj;
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_DecrRefCount(obj);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_dbcursor.c b/storage/bdb/tcl/tcl_dbcursor.c
new file mode 100644
index 00000000000..fb426e53f48
--- /dev/null
+++ b/storage/bdb/tcl/tcl_dbcursor.c
@@ -0,0 +1,924 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_dbcursor.c,v 11.51 2002/08/06 06:20:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *, int));
+static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+
+/*
+ * PUBLIC: int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * dbc_cmd --
+ * Implements the cursor command.
+ */
+int
+dbc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *dbccmds[] = {
+#if CONFIG_TEST
+ "pget",
+#endif
+ "close",
+ "del",
+ "dup",
+ "get",
+ "put",
+ NULL
+ };
+ enum dbccmds {
+#if CONFIG_TEST
+ DBCPGET,
+#endif
+ DBCCLOSE,
+ DBCDELETE,
+ DBCDUP,
+ DBCGET,
+ DBCPUT
+ };
+ DBC *dbc;
+ DBTCL_INFO *dbip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ dbc = (DBC *)clientData;
+ dbip = _PtrToInfo((void *)dbc);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbc == NULL) {
+ Tcl_SetResult(interp, "NULL dbc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "NULL dbc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], dbccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum dbccmds)cmdindex) {
+#if CONFIG_TEST
+ case DBCPGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc, 1);
+ break;
+#endif
+ case DBCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_close(dbc);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "dbc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+ }
+ break;
+ case DBCDELETE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbc->c_del(dbc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCDEL(ret),
+ "dbc delete");
+ break;
+ case DBCDUP:
+ result = tcl_DbcDup(interp, objc, objv, dbc);
+ break;
+ case DBCGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc, 0);
+ break;
+ case DBCPUT:
+ result = tcl_DbcPut(interp, objc, objv, dbc);
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_DbcPut --
+ */
+static int
+tcl_DbcPut(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcutopts[] = {
+#if CONFIG_TEST
+ "-nodupdata",
+#endif
+ "-after",
+ "-before",
+ "-current",
+ "-keyfirst",
+ "-keylast",
+ "-partial",
+ NULL
+ };
+ enum dbcutopts {
+#if CONFIG_TEST
+ DBCPUT_NODUPDATA,
+#endif
+ DBCPUT_AFTER,
+ DBCPUT_BEFORE,
+ DBCPUT_CURRENT,
+ DBCPUT_KEYFIRST,
+ DBCPUT_KEYLAST,
+ DBCPUT_PART
+ };
+ DB *thisdbp;
+ DBT key, data;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE type;
+ Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
+ db_recno_t recno;
+ u_int32_t flag;
+ int elemc, freekey, freedata, i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = freedata = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < (objc - 1)) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcutopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcutopts)optindex) {
+#if CONFIG_TEST
+ case DBCPUT_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
+ case DBCPUT_AFTER:
+ FLAG_CHECK(flag);
+ flag = DB_AFTER;
+ break;
+ case DBCPUT_BEFORE:
+ FLAG_CHECK(flag);
+ flag = DB_BEFORE;
+ break;
+ case DBCPUT_CURRENT:
+ FLAG_CHECK(flag);
+ flag = DB_CURRENT;
+ break;
+ case DBCPUT_KEYFIRST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYFIRST;
+ break;
+ case DBCPUT_KEYLAST:
+ FLAG_CHECK(flag);
+ flag = DB_KEYLAST;
+ break;
+ case DBCPUT_PART:
+ if (i > (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database or not. If we are,
+ * then key.data is a recno, not a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL)
+ type = DB_UNKNOWN;
+ else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ return (result);
+ }
+ thisdbp = dbip->i_dbp;
+ (void)thisdbp->get_type(thisdbp, &type);
+ }
+ /*
+ * When we get here, we better have:
+ * 1 arg if -after, -before or -current
+ * 2 args in all other cases
+ */
+ if (flag == DB_AFTER || flag == DB_BEFORE || flag == DB_CURRENT) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ /*
+ * We want to get the key back, so we need to set
+ * up the location to get it back in.
+ */
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ recno = 0;
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ }
+ } else {
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? key data");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[objc-2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ return (result);
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ }
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ data.data = dtmp;
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ goto out;
+ }
+ _debug_check();
+ ret = dbc->c_put(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCPUT(ret),
+ "dbc put");
+ if (ret == 0 &&
+ (flag == DB_AFTER || flag == DB_BEFORE) && type == DB_RECNO) {
+ res = Tcl_NewLongObj((long)*(db_recno_t *)key.data);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+}
+
+/*
+ * tcl_dbc_get --
+ */
+static int
+tcl_DbcGet(interp, objc, objv, dbc, ispget)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+ int ispget; /* 1 for pget, 0 for get */
+{
+ static char *dbcgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-get_both_range",
+ "-multi",
+ "-multi_key",
+#endif
+ "-current",
+ "-first",
+ "-get_both",
+ "-get_recno",
+ "-join_item",
+ "-last",
+ "-next",
+ "-nextdup",
+ "-nextnodup",
+ "-partial",
+ "-prev",
+ "-prevnodup",
+ "-rmw",
+ "-set",
+ "-set_range",
+ "-set_recno",
+ NULL
+ };
+ enum dbcgetopts {
+#if CONFIG_TEST
+ DBCGET_DIRTY,
+ DBCGET_BOTH_RANGE,
+ DBCGET_MULTI,
+ DBCGET_MULTI_KEY,
+#endif
+ DBCGET_CURRENT,
+ DBCGET_FIRST,
+ DBCGET_BOTH,
+ DBCGET_RECNO,
+ DBCGET_JOIN,
+ DBCGET_LAST,
+ DBCGET_NEXT,
+ DBCGET_NEXTDUP,
+ DBCGET_NEXTNODUP,
+ DBCGET_PART,
+ DBCGET_PREV,
+ DBCGET_PREVNODUP,
+ DBCGET_RMW,
+ DBCGET_SET,
+ DBCGET_SETRANGE,
+ DBCGET_SETRECNO
+ };
+ DB *thisdbp;
+ DBT key, data, pdata;
+ DBTCL_INFO *dbcip, *dbip;
+ DBTYPE ptype, type;
+ Tcl_Obj **elemv, *myobj, *retlist;
+ void *dtmp, *ktmp;
+ db_recno_t precno, recno;
+ u_int32_t flag, op;
+ int bufsize, elemc, freekey, freedata, i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freekey = freedata = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
+ return (TCL_ERROR);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcgetopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcgetopts)optindex) {
+#if CONFIG_TEST
+ case DBCGET_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCGET_BOTH_RANGE:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_BOTH_RANGE;
+ break;
+ case DBCGET_MULTI:
+ flag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+ case DBCGET_MULTI_KEY:
+ flag |= DB_MULTIPLE_KEY;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
+ case DBCGET_RMW:
+ flag |= DB_RMW;
+ break;
+ case DBCGET_CURRENT:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_CURRENT;
+ break;
+ case DBCGET_FIRST:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_FIRST;
+ break;
+ case DBCGET_LAST:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_LAST;
+ break;
+ case DBCGET_NEXT:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT;
+ break;
+ case DBCGET_PREV:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_PREV;
+ break;
+ case DBCGET_PREVNODUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_PREV_NODUP;
+ break;
+ case DBCGET_NEXTNODUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT_NODUP;
+ break;
+ case DBCGET_NEXTDUP:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_NEXT_DUP;
+ break;
+ case DBCGET_BOTH:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_BOTH;
+ break;
+ case DBCGET_RECNO:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_RECNO;
+ break;
+ case DBCGET_JOIN:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_JOIN_ITEM;
+ break;
+ case DBCGET_SET:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET;
+ break;
+ case DBCGET_SETRANGE:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET_RANGE;
+ break;
+ case DBCGET_SETRECNO:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_SET_RECNO;
+ break;
+ case DBCGET_PART:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-partial {offset length}?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Get sublist as {offset length}
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &elemc, &elemv);
+ if (elemc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {offset length}", TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+ data.flags |= DB_DBT_PARTIAL;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
+ /*
+ * NOTE: We don't check result here because all we'd
+ * do is break anyway, and we are doing that. If you
+ * add code here, you WILL need to add the check
+ * for result. (See the check for save.doff, a few
+ * lines above and copy that.)
+ */
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL) {
+ type = DB_UNKNOWN;
+ ptype = DB_UNKNOWN;
+ } else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ thisdbp = dbip->i_dbp;
+ (void)thisdbp->get_type(thisdbp, &type);
+ if (ispget && thisdbp->s_primary != NULL)
+ (void)thisdbp->
+ s_primary->get_type(thisdbp->s_primary, &ptype);
+ else
+ ptype = DB_UNKNOWN;
+ }
+ /*
+ * When we get here, we better have:
+ * 2 args, key and data if GET_BOTH/GET_BOTH_RANGE was specified.
+ * 1 arg if -set, -set_range or -set_recno
+ * 0 in all other cases.
+ */
+ op = flag & DB_OPFLAGS_MASK;
+ switch (op) {
+ case DB_GET_BOTH:
+#if CONFIG_TEST
+ case DB_GET_BOTH_RANGE:
+#endif
+ if (i != (objc - 2)) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-args? -get_both key data");
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(
+ interp, objv[objc-2], &recno);
+ if (result == TCL_OK) {
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ if (ptype == DB_RECNO || ptype == DB_QUEUE) {
+ result = _GetUInt32(
+ interp, objv[objc-1], &precno);
+ if (result == TCL_OK) {
+ data.data = &precno;
+ data.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ goto out;
+ }
+ data.data = dtmp;
+ }
+ }
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_SET_RECNO:
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (op == DB_SET_RECNO ||
+ type == DB_RECNO || type == DB_QUEUE) {
+ result = _GetUInt32(interp, objv[objc - 1], &recno);
+ key.data = &recno;
+ key.size = sizeof(db_recno_t);
+ } else {
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ break;
+ default:
+ if (i != objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ result = TCL_ERROR;
+ goto out;
+ }
+ key.flags |= DB_DBT_MALLOC;
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ }
+
+ _debug_check();
+ memset(&pdata, 0, sizeof(DBT));
+ if (ispget) {
+ F_SET(&pdata, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &data, &pdata, flag);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), "dbc get");
+ if (result == TCL_ERROR)
+ goto out;
+
+ retlist = Tcl_NewListObj(0, NULL);
+ if (ret == DB_NOTFOUND)
+ goto out1;
+ if (op == DB_GET_RECNO) {
+ recno = *((db_recno_t *)data.data);
+ myobj = Tcl_NewLongObj((long)recno);
+ result = Tcl_ListObjAppendElement(interp, retlist, myobj);
+ } else {
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if ((type == DB_RECNO || type == DB_QUEUE) &&
+ key.data != NULL) {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 1,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListRecnoElem(interp, retlist,
+ *(db_recno_t *)key.data,
+ data.data, data.size);
+ } else {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ }
+ }
+ if (key.data != NULL && F_ISSET(&key, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, key.data);
+ if (data.data != NULL && F_ISSET(&data, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, data.data);
+ if (pdata.data != NULL && F_ISSET(&pdata, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, pdata.data);
+out1:
+ if (result == TCL_OK)
+ Tcl_SetObjResult(interp, retlist);
+out:
+ if (data.data != NULL && flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ __os_free(dbc->dbp->dbenv, data.data);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
+ return (result);
+
+}
+
+/*
+ * tcl_DbcDup --
+ */
+static int
+tcl_DbcDup(interp, objc, objv, dbc)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DBC *dbc; /* Cursor pointer */
+{
+ static char *dbcdupopts[] = {
+ "-position",
+ NULL
+ };
+ enum dbcdupopts {
+ DBCDUP_POS
+ };
+ DBC *newdbc;
+ DBTCL_INFO *dbcip, *newdbcip, *dbip;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ flag = 0;
+ res = NULL;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcdupopts,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get
+ * an errant error message if there is another error.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK) {
+ result = TCL_OK;
+ goto out;
+ }
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbcdupopts)optindex) {
+ case DBCDUP_POS:
+ flag = DB_POSITION;
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ /*
+ * We need to determine if we are a recno database
+ * or not. If we are, then key.data is a recno, not
+ * a string.
+ */
+ dbcip = _PtrToInfo(dbc);
+ if (dbcip == NULL) {
+ Tcl_SetResult(interp, "Cursor without info structure",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else {
+ dbip = dbcip->i_parent;
+ if (dbip == NULL) {
+ Tcl_SetResult(interp, "Cursor without parent database",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+ }
+ /*
+ * Now duplicate the cursor. If successful, we need to create
+ * a new cursor command.
+ */
+
+ snprintf(newname, sizeof(newname),
+ "%s.c%d", dbip->i_name, dbip->i_dbdbcid);
+ newdbcip = _NewInfo(interp, NULL, newname, I_DBC);
+ if (newdbcip != NULL) {
+ ret = dbc->c_dup(dbc, &newdbc, flag);
+ if (ret == 0) {
+ dbip->i_dbdbcid++;
+ newdbcip->i_parent = dbip;
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)dbc_Cmd,
+ (ClientData)newdbc, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(newdbcip, newdbc);
+ Tcl_SetObjResult(interp, res);
+ } else {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db dup");
+ _DeleteInfo(newdbcip);
+ }
+ } else {
+ Tcl_SetResult(interp, "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+out:
+ return (result);
+
+}
diff --git a/storage/bdb/tcl/tcl_env.c b/storage/bdb/tcl/tcl_env.c
new file mode 100644
index 00000000000..cdf4890e9fc
--- /dev/null
+++ b/storage/bdb/tcl/tcl_env.c
@@ -0,0 +1,1310 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_env.c,v 11.84 2002/08/06 06:21:03 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+static int env_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+static int env_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+
+/*
+ * PUBLIC: int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ *
+ * env_Cmd --
+ * Implements the "env" command.
+ */
+int
+env_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Env handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *envcmds[] = {
+#if CONFIG_TEST
+ "attributes",
+ "lock_detect",
+ "lock_id",
+ "lock_id_free",
+ "lock_id_set",
+ "lock_get",
+ "lock_stat",
+ "lock_timeout",
+ "lock_vec",
+ "log_archive",
+ "log_compare",
+ "log_cursor",
+ "log_file",
+ "log_flush",
+ "log_get",
+ "log_put",
+ "log_stat",
+ "mpool",
+ "mpool_stat",
+ "mpool_sync",
+ "mpool_trickle",
+ "mutex",
+ "rep_elect",
+ "rep_flush",
+ "rep_limit",
+ "rep_process_message",
+ "rep_request",
+ "rep_start",
+ "rep_stat",
+ "rpcid",
+ "test",
+ "txn_checkpoint",
+ "txn_id_set",
+ "txn_recover",
+ "txn_stat",
+ "txn_timeout",
+ "verbose",
+#endif
+ "close",
+ "dbremove",
+ "dbrename",
+ "txn",
+ NULL
+ };
+ enum envcmds {
+#if CONFIG_TEST
+ ENVATTR,
+ ENVLKDETECT,
+ ENVLKID,
+ ENVLKFREEID,
+ ENVLKSETID,
+ ENVLKGET,
+ ENVLKSTAT,
+ ENVLKTIMEOUT,
+ ENVLKVEC,
+ ENVLOGARCH,
+ ENVLOGCMP,
+ ENVLOGCURSOR,
+ ENVLOGFILE,
+ ENVLOGFLUSH,
+ ENVLOGGET,
+ ENVLOGPUT,
+ ENVLOGSTAT,
+ ENVMP,
+ ENVMPSTAT,
+ ENVMPSYNC,
+ ENVTRICKLE,
+ ENVMUTEX,
+ ENVREPELECT,
+ ENVREPFLUSH,
+ ENVREPLIMIT,
+ ENVREPPROCMESS,
+ ENVREPREQUEST,
+ ENVREPSTART,
+ ENVREPSTAT,
+ ENVRPCID,
+ ENVTEST,
+ ENVTXNCKP,
+ ENVTXNSETID,
+ ENVTXNRECOVER,
+ ENVTXNSTAT,
+ ENVTXNTIMEOUT,
+ ENVVERB,
+#endif
+ ENVCLOSE,
+ ENVDBREMOVE,
+ ENVDBRENAME,
+ ENVTXN
+ };
+ DBTCL_INFO *envip, *logcip;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ Tcl_Obj *res;
+ char newname[MSG_SIZE];
+ int cmdindex, result, ret;
+ u_int32_t newval;
+#if CONFIG_TEST
+ u_int32_t otherval;
+#endif
+
+ Tcl_ResetResult(interp);
+ dbenv = (DB_ENV *)clientData;
+ envip = _PtrToInfo((void *)dbenv);
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (dbenv == NULL) {
+ Tcl_SetResult(interp, "NULL env pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "NULL env info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], envcmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ res = NULL;
+ switch ((enum envcmds)cmdindex) {
+#if CONFIG_TEST
+ case ENVLKDETECT:
+ result = tcl_LockDetect(interp, objc, objv, dbenv);
+ break;
+ case ENVLKSTAT:
+ result = tcl_LockStat(interp, objc, objv, dbenv);
+ break;
+ case ENVLKTIMEOUT:
+ result = tcl_LockTimeout(interp, objc, objv, dbenv);
+ break;
+ case ENVLKID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = dbenv->lock_id(dbenv, &newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_id");
+ if (result == TCL_OK)
+ res = Tcl_NewLongObj((long)newval);
+ break;
+ case ENVLKFREEID:
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_free(dbenv, newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVLKSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVLKGET:
+ result = tcl_LockGet(interp, objc, objv, dbenv);
+ break;
+ case ENVLKVEC:
+ result = tcl_LockVec(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGARCH:
+ result = tcl_LogArchive(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGCMP:
+ result = tcl_LogCompare(interp, objc, objv);
+ break;
+ case ENVLOGCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.logc%d", envip->i_name, envip->i_envlogcid);
+ logcip = _NewInfo(interp, NULL, newname, I_LOGC);
+ if (logcip != NULL) {
+ ret = dbenv->log_cursor(dbenv, &logc, 0);
+ if (ret == 0) {
+ result = TCL_OK;
+ envip->i_envlogcid++;
+ /*
+ * We do NOT want to set i_parent to
+ * envip here because log cursors are
+ * not "tied" to the env. That is, they
+ * are NOT closed if the env is closed.
+ */
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)logc_Cmd,
+ (ClientData)logc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(logcip, logc);
+ } else {
+ _DeleteInfo(logcip);
+ result = _ErrorSetup(interp, ret, "log cursor");
+ }
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
+ case ENVLOGFILE:
+ result = tcl_LogFile(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGFLUSH:
+ result = tcl_LogFlush(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGGET:
+ result = tcl_LogGet(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGPUT:
+ result = tcl_LogPut(interp, objc, objv, dbenv);
+ break;
+ case ENVLOGSTAT:
+ result = tcl_LogStat(interp, objc, objv, dbenv);
+ break;
+ case ENVMPSTAT:
+ result = tcl_MpStat(interp, objc, objv, dbenv);
+ break;
+ case ENVMPSYNC:
+ result = tcl_MpSync(interp, objc, objv, dbenv);
+ break;
+ case ENVTRICKLE:
+ result = tcl_MpTrickle(interp, objc, objv, dbenv);
+ break;
+ case ENVMP:
+ result = tcl_Mp(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVREPELECT:
+ result = tcl_RepElect(interp, objc, objv, dbenv);
+ break;
+ case ENVREPFLUSH:
+ result = tcl_RepFlush(interp, objc, objv, dbenv);
+ break;
+ case ENVREPLIMIT:
+ result = tcl_RepLimit(interp, objc, objv, dbenv);
+ break;
+ case ENVREPPROCMESS:
+ result = tcl_RepProcessMessage(interp, objc, objv, dbenv);
+ break;
+ case ENVREPREQUEST:
+ result = tcl_RepRequest(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTART:
+ result = tcl_RepStart(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTAT:
+ result = tcl_RepStat(interp, objc, objv, dbenv);
+ break;
+ case ENVRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbenv->cl_id);
+ break;
+ case ENVTXNCKP:
+ result = tcl_TxnCheckpoint(interp, objc, objv, dbenv);
+ break;
+ case ENVTXNSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->txn_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVTXNRECOVER:
+ result = tcl_TxnRecover(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVTXNSTAT:
+ result = tcl_TxnStat(interp, objc, objv, dbenv);
+ break;
+ case ENVTXNTIMEOUT:
+ result = tcl_TxnTimeout(interp, objc, objv, dbenv);
+ break;
+ case ENVMUTEX:
+ result = tcl_Mutex(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVATTR:
+ result = tcl_EnvAttr(interp, objc, objv, dbenv);
+ break;
+ case ENVTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbenv);
+ break;
+ case ENVVERB:
+ /*
+ * Two args for this. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = tcl_EnvVerbose(interp, dbenv, objv[2], objv[3]);
+ break;
+#endif
+ case ENVCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Any transactions will be aborted, and an mpools
+ * closed automatically. We must delete any txn
+ * and mp widgets we have here too for this env.
+ * NOTE: envip is freed when we come back from
+ * this function. Set it to NULL to make sure no
+ * one tries to use it later.
+ */
+ _debug_check();
+ ret = dbenv->close(dbenv, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env close");
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ break;
+ case ENVDBREMOVE:
+ result = env_DbRemove(interp, objc, objv, dbenv);
+ break;
+ case ENVDBRENAME:
+ result = env_DbRename(interp, objc, objv, dbenv);
+ break;
+ case ENVTXN:
+ result = tcl_Txn(interp, objc, objv, dbenv, envip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * PUBLIC: int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ * PUBLIC: DB_ENV *, DBTCL_INFO *));
+ *
+ * tcl_EnvRemove --
+ */
+int
+tcl_EnvRemove(interp, objc, objv, dbenv, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *envremopts[] = {
+#if CONFIG_TEST
+ "-overwrite",
+ "-server",
+#endif
+ "-data_dir",
+ "-encryptaes",
+ "-encryptany",
+ "-force",
+ "-home",
+ "-log_dir",
+ "-tmp_dir",
+ "-use_environ",
+ "-use_environ_root",
+ NULL
+ };
+ enum envremopts {
+#if CONFIG_TEST
+ ENVREM_OVERWRITE,
+ ENVREM_SERVER,
+#endif
+ ENVREM_DATADIR,
+ ENVREM_ENCRYPT_AES,
+ ENVREM_ENCRYPT_ANY,
+ ENVREM_FORCE,
+ ENVREM_HOME,
+ ENVREM_LOGDIR,
+ ENVREM_TMPDIR,
+ ENVREM_USE_ENVIRON,
+ ENVREM_USE_ENVIRON_ROOT
+ };
+ DB_ENV *e;
+ u_int32_t cflag, enc_flag, flag, forceflag, sflag;
+ int i, optindex, result, ret;
+ char *datadir, *home, *logdir, *passwd, *server, *tmpdir;
+
+ result = TCL_OK;
+ cflag = flag = forceflag = sflag = 0;
+ home = NULL;
+ passwd = NULL;
+ datadir = logdir = tmpdir = NULL;
+ server = NULL;
+ enc_flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args?");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envremopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto error;
+ }
+ i++;
+ switch ((enum envremopts)optindex) {
+#if CONFIG_TEST
+ case ENVREM_SERVER:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server name?");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ cflag = DB_CLIENT;
+ break;
+#endif
+ case ENVREM_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case ENVREM_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case ENVREM_FORCE:
+ forceflag |= DB_FORCE;
+ break;
+ case ENVREM_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+#if CONFIG_TEST
+ case ENVREM_OVERWRITE:
+ sflag |= DB_OVERWRITE;
+ break;
+#endif
+ case ENVREM_USE_ENVIRON:
+ flag |= DB_USE_ENVIRON;
+ break;
+ case ENVREM_USE_ENVIRON_ROOT:
+ flag |= DB_USE_ENVIRON_ROOT;
+ break;
+ case ENVREM_DATADIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-data_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ datadir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_LOGDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-log_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ logdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENVREM_TMPDIR:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-tmp_dir dir");
+ result = TCL_ERROR;
+ break;
+ }
+ tmpdir = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ /*
+ * If dbenv is NULL, we don't have an open env and we need to open
+ * one of the user. Don't bother with the info stuff.
+ */
+ if (dbenv == NULL) {
+ if ((ret = db_env_create(&e, cflag)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create");
+ goto error;
+ }
+ if (server != NULL) {
+ _debug_check();
+ ret = e->set_rpc_server(e, NULL, server, 0, 0, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rpc_server");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (datadir != NULL) {
+ _debug_check();
+ ret = e->set_data_dir(e, datadir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (logdir != NULL) {
+ _debug_check();
+ ret = e->set_lg_dir(e, logdir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_log_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (tmpdir != NULL) {
+ _debug_check();
+ ret = e->set_tmp_dir(e, tmpdir);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = e->set_encrypt(e, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (sflag != 0 && (ret = e->set_flags(e, sflag, 1)) != 0) {
+ _debug_check();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ if (result != TCL_OK)
+ goto error;
+ }
+ } else {
+ /*
+ * We have to clean up any info associated with this env,
+ * regardless of the result of the remove so do it first.
+ * NOTE: envip is freed when we come back from this function.
+ */
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ e = dbenv;
+ }
+
+ flag |= forceflag;
+ /*
+ * When we get here we have parsed all the args. Now remove
+ * the environment.
+ */
+ _debug_check();
+ ret = e->remove(e, home, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env remove");
+error:
+ return (result);
+}
+
+static void
+_EnvInfoDelete(interp, envip)
+ Tcl_Interp *interp; /* Tcl Interpreter */
+ DBTCL_INFO *envip; /* Info for env */
+{
+ DBTCL_INFO *nextp, *p;
+
+ /*
+ * Before we can delete the environment info, we must close
+ * any open subsystems in this env. We will:
+ * 1. Abort any transactions (which aborts any nested txns).
+ * 2. Close any mpools (which will put any pages itself).
+ * 3. Put any locks and close log cursors.
+ * 4. Close the error file.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * env. If so, remove its commands and info structure.
+ * We do not close/abort/whatever here, because we
+ * don't want to replicate DB behavior.
+ *
+ * NOTE: Only those types that can nest need to be
+ * itemized in the switch below. That is txns and mps.
+ * Other types like log cursors and locks will just
+ * get cleaned up here.
+ */
+ if (p->i_parent == envip) {
+ switch (p->i_type) {
+ case I_TXN:
+ _TxnInfoDelete(interp, p);
+ break;
+ case I_MP:
+ _MpInfoDelete(interp, p);
+ break;
+ default:
+ Tcl_SetResult(interp,
+ "_EnvInfoDelete: bad info type",
+ TCL_STATIC);
+ break;
+ }
+ nextp = LIST_NEXT(p, entries);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ } else
+ nextp = LIST_NEXT(p, entries);
+ }
+ (void)Tcl_DeleteCommand(interp, envip->i_name);
+ _DeleteInfo(envip);
+}
+
+#if CONFIG_TEST
+/*
+ * PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *,
+ * PUBLIC: Tcl_Obj *));
+ *
+ * tcl_EnvVerbose --
+ */
+int
+tcl_EnvVerbose(interp, dbenv, which, onoff)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *dbenv; /* Env pointer */
+ Tcl_Obj *which; /* Which subsystem */
+ Tcl_Obj *onoff; /* On or off */
+{
+ static char *verbwhich[] = {
+ "chkpt",
+ "deadlock",
+ "recovery",
+ "rep",
+ "wait",
+ NULL
+ };
+ enum verbwhich {
+ ENVVERB_CHK,
+ ENVVERB_DEAD,
+ ENVVERB_REC,
+ ENVVERB_REP,
+ ENVVERB_WAIT
+ };
+ static char *verbonoff[] = {
+ "off",
+ "on",
+ NULL
+ };
+ enum verbonoff {
+ ENVVERB_OFF,
+ ENVVERB_ON
+ };
+ int on, optindex, ret;
+ u_int32_t wh;
+
+ if (Tcl_GetIndexFromObj(interp, which, verbwhich, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(which));
+
+ switch ((enum verbwhich)optindex) {
+ case ENVVERB_CHK:
+ wh = DB_VERB_CHKPOINT;
+ break;
+ case ENVVERB_DEAD:
+ wh = DB_VERB_DEADLOCK;
+ break;
+ case ENVVERB_REC:
+ wh = DB_VERB_RECOVERY;
+ break;
+ case ENVVERB_REP:
+ wh = DB_VERB_REPLICATION;
+ break;
+ case ENVVERB_WAIT:
+ wh = DB_VERB_WAITSFOR;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ if (Tcl_GetIndexFromObj(interp, onoff, verbonoff, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(onoff));
+ switch ((enum verbonoff)optindex) {
+ case ENVVERB_OFF:
+ on = 0;
+ break;
+ case ENVVERB_ON:
+ on = 1;
+ break;
+ default:
+ return (TCL_ERROR);
+ }
+ ret = dbenv->set_verbose(dbenv, wh, on);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set verbose"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * PUBLIC: int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvAttr --
+ * Return a list of the env's attributes
+ */
+int
+tcl_EnvAttr(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+{
+ int result;
+ Tcl_Obj *myobj, *retlist;
+
+ result = TCL_OK;
+
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ retlist = Tcl_NewListObj(0, NULL);
+ /*
+ * XXX
+ * We peek at the dbenv to determine what subsystems
+ * we have available in this env.
+ */
+ myobj = Tcl_NewStringObj("-home", strlen("-home"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ myobj = Tcl_NewStringObj(dbenv->db_home, strlen(dbenv->db_home));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ if (CDB_LOCKING(dbenv)) {
+ myobj = Tcl_NewStringObj("-cdb", strlen("-cdb"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (CRYPTO_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-crypto", strlen("-crypto"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOCKING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-lock", strlen("-lock"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOGGING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-log", strlen("-log"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (MPOOL_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-mpool", strlen("-mpool"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (RPC_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-rpc", strlen("-rpc"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (TXN_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-txn", strlen("-txn"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ Tcl_SetObjResult(interp, retlist);
+err:
+ return (result);
+}
+
+/*
+ * PUBLIC: int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvTest --
+ */
+int
+tcl_EnvTest(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+{
+ static char *envtestcmd[] = {
+ "abort",
+ "copy",
+ NULL
+ };
+ enum envtestcmd {
+ ENVTEST_ABORT,
+ ENVTEST_COPY
+ };
+ static char *envtestat[] = {
+ "electinit",
+ "electsend",
+ "electvote1",
+ "electvote2",
+ "electwait1",
+ "electwait2",
+ "none",
+ "predestroy",
+ "preopen",
+ "postdestroy",
+ "postlog",
+ "postlogmeta",
+ "postopen",
+ "postsync",
+ "subdb_lock",
+ NULL
+ };
+ enum envtestat {
+ ENVTEST_ELECTINIT,
+ ENVTEST_ELECTSEND,
+ ENVTEST_ELECTVOTE1,
+ ENVTEST_ELECTVOTE2,
+ ENVTEST_ELECTWAIT1,
+ ENVTEST_ELECTWAIT2,
+ ENVTEST_NONE,
+ ENVTEST_PREDESTROY,
+ ENVTEST_PREOPEN,
+ ENVTEST_POSTDESTROY,
+ ENVTEST_POSTLOG,
+ ENVTEST_POSTLOGMETA,
+ ENVTEST_POSTOPEN,
+ ENVTEST_POSTSYNC,
+ ENVTEST_SUBDB_LOCKS
+ };
+ int *loc, optindex, result, testval;
+
+ result = TCL_OK;
+ loc = NULL;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "abort|copy location");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the "copy" or "abort" portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2], envtestcmd, "command",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[2]);
+ return (result);
+ }
+ switch ((enum envtestcmd)optindex) {
+ case ENVTEST_ABORT:
+ loc = &dbenv->test_abort;
+ break;
+ case ENVTEST_COPY:
+ loc = &dbenv->test_copy;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal store location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * This must be the location portion of the command.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[3], envtestat, "location",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[3]);
+ return (result);
+ }
+ switch ((enum envtestat)optindex) {
+ case ENVTEST_ELECTINIT:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTINIT;
+ break;
+ case ENVTEST_ELECTSEND:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTSEND;
+ break;
+ case ENVTEST_ELECTVOTE1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE1;
+ break;
+ case ENVTEST_ELECTVOTE2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE2;
+ break;
+ case ENVTEST_ELECTWAIT1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT1;
+ break;
+ case ENVTEST_ELECTWAIT2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT2;
+ break;
+ case ENVTEST_NONE:
+ testval = 0;
+ break;
+ case ENVTEST_PREOPEN:
+ testval = DB_TEST_PREOPEN;
+ break;
+ case ENVTEST_PREDESTROY:
+ testval = DB_TEST_PREDESTROY;
+ break;
+ case ENVTEST_POSTLOG:
+ testval = DB_TEST_POSTLOG;
+ break;
+ case ENVTEST_POSTLOGMETA:
+ testval = DB_TEST_POSTLOGMETA;
+ break;
+ case ENVTEST_POSTOPEN:
+ testval = DB_TEST_POSTOPEN;
+ break;
+ case ENVTEST_POSTDESTROY:
+ testval = DB_TEST_POSTDESTROY;
+ break;
+ case ENVTEST_POSTSYNC:
+ testval = DB_TEST_POSTSYNC;
+ break;
+ case ENVTEST_SUBDB_LOCKS:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_SUBDB_LOCKS;
+ break;
+ default:
+ Tcl_SetResult(interp, "Illegal test location", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ *loc = testval;
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (result);
+}
+#endif
+
+/*
+ * env_DbRemove --
+ * Implements the ENV->dbremove command.
+ */
+static int
+env_DbRemove(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbrem[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbrem {
+ TCL_EDBREM_COMMIT,
+ TCL_EDBREM_TXN,
+ TCL_EDBREM_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbrem)optindex) {
+ case TCL_EDBREM_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbremove: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBREM_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbremove(dbenv, txn, db, subdb, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbremove");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ return (result);
+}
+
+/*
+ * env_DbRename --
+ * Implements the ENV->dbrename command.
+ */
+static int
+env_DbRename(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbmv[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbmv {
+ TCL_EDBMV_COMMIT,
+ TCL_EDBMV_TXN,
+ TCL_EDBMV_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *newname, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = newname = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbmv)optindex) {
+ case TCL_EDBMV_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbrename: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBMV_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a db name, if 3 a db and subdb name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(dbenv, newlen + 1,
+ &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbrename(dbenv, txn, db, subdb, newname, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbrename");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ if (newname)
+ __os_free(dbenv, newname);
+ return (result);
+}
diff --git a/storage/bdb/tcl/tcl_internal.c b/storage/bdb/tcl/tcl_internal.c
new file mode 100644
index 00000000000..2d6ad4df444
--- /dev/null
+++ b/storage/bdb/tcl/tcl_internal.c
@@ -0,0 +1,717 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_internal.c,v 11.54 2002/08/15 02:47:46 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc_auto/db_ext.h"
+
+/*
+ *
+ * internal.c --
+ *
+ * This file contains internal functions we need to maintain
+ * state for our Tcl interface.
+ *
+ * NOTE: This all uses a linear linked list. If we end up with
+ * too many info structs such that this is a performance hit, it
+ * should be redone using hashes or a list per type. The assumption
+ * is that the user won't have more than a few dozen info structs
+ * in operation at any given point in time. Even a complicated
+ * application with a few environments, nested transactions, locking,
+ * and several databases open, using cursors should not have a
+ * negative performance impact, in terms of searching the list to
+ * get/manipulate the info structure.
+ */
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static void tcl_flag_callback __P((u_int32_t, const FN *, void *));
+
+/*
+ * Private structure type used to pass both an interp and an object into
+ * a callback's single void *.
+ */
+struct __tcl_callback_bundle {
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+};
+
+#define GLOB_CHAR(c) ((c) == '*' || (c) == '?')
+
+/*
+ * PUBLIC: DBTCL_INFO *_NewInfo __P((Tcl_Interp *,
+ * PUBLIC: void *, char *, enum INFOTYPE));
+ *
+ * _NewInfo --
+ *
+ * This function will create a new info structure and fill it in
+ * with the name and pointer, id and type.
+ */
+DBTCL_INFO *
+_NewInfo(interp, anyp, name, type)
+ Tcl_Interp *interp;
+ void *anyp;
+ char *name;
+ enum INFOTYPE type;
+{
+ DBTCL_INFO *p;
+ int i, ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), &p)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (NULL);
+ }
+
+ if ((ret = __os_strdup(NULL, name, &p->i_name)) != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ __os_free(NULL, p);
+ return (NULL);
+ }
+ p->i_interp = interp;
+ p->i_anyp = anyp;
+ p->i_data = 0;
+ p->i_data2 = 0;
+ p->i_type = type;
+ p->i_parent = NULL;
+ p->i_err = NULL;
+ p->i_errpfx = NULL;
+ p->i_lockobj.data = NULL;
+ p->i_btcompare = NULL;
+ p->i_dupcompare = NULL;
+ p->i_hashproc = NULL;
+ p->i_second_call = NULL;
+ p->i_rep_eid = NULL;
+ p->i_rep_send = NULL;
+ for (i = 0; i < MAX_ID; i++)
+ p->i_otherid[i] = 0;
+
+ LIST_INSERT_HEAD(&__db_infohead, p, entries);
+ return (p);
+}
+
+/*
+ * PUBLIC: void *_NameToPtr __P((CONST char *));
+ */
+void *
+_NameToPtr(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p->i_anyp);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_PtrToInfo __P((CONST void *));
+ */
+DBTCL_INFO *
+_PtrToInfo(ptr)
+ CONST void *ptr;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (p->i_anyp == ptr)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: DBTCL_INFO *_NameToInfo __P((CONST char *));
+ */
+DBTCL_INFO *
+_NameToInfo(name)
+ CONST char *name;
+{
+ DBTCL_INFO *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL;
+ p = LIST_NEXT(p, entries))
+ if (strcmp(name, p->i_name) == 0)
+ return (p);
+ return (NULL);
+}
+
+/*
+ * PUBLIC: void _SetInfoData __P((DBTCL_INFO *, void *));
+ */
+void
+_SetInfoData(p, data)
+ DBTCL_INFO *p;
+ void *data;
+{
+ if (p == NULL)
+ return;
+ p->i_anyp = data;
+ return;
+}
+
+/*
+ * PUBLIC: void _DeleteInfo __P((DBTCL_INFO *));
+ */
+void
+_DeleteInfo(p)
+ DBTCL_INFO *p;
+{
+ if (p == NULL)
+ return;
+ LIST_REMOVE(p, entries);
+ if (p->i_lockobj.data != NULL)
+ __os_free(NULL, p->i_lockobj.data);
+ if (p->i_err != NULL) {
+ fclose(p->i_err);
+ p->i_err = NULL;
+ }
+ if (p->i_errpfx != NULL)
+ __os_free(NULL, p->i_errpfx);
+ if (p->i_btcompare != NULL)
+ Tcl_DecrRefCount(p->i_btcompare);
+ if (p->i_dupcompare != NULL)
+ Tcl_DecrRefCount(p->i_dupcompare);
+ if (p->i_hashproc != NULL)
+ Tcl_DecrRefCount(p->i_hashproc);
+ if (p->i_second_call != NULL)
+ Tcl_DecrRefCount(p->i_second_call);
+ if (p->i_rep_eid != NULL)
+ Tcl_DecrRefCount(p->i_rep_eid);
+ if (p->i_rep_send != NULL)
+ Tcl_DecrRefCount(p->i_rep_send);
+ __os_free(NULL, p->i_name);
+ __os_free(NULL, p);
+
+ return;
+}
+
+/*
+ * PUBLIC: int _SetListElem __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, void *, int, void *, int));
+ */
+int
+_SetListElem(interp, list, elem1, e1cnt, elem2, e2cnt)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1, *elem2;
+ int e1cnt, e2cnt;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, e1cnt);
+ myobjv[1] = Tcl_NewByteArrayObj((u_char *)elem2, e2cnt);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * PUBLIC: int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+ */
+int
+_SetListElemInt(interp, list, elem1, elem2)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ void *elem1;
+ int elem2;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewByteArrayObj((u_char *)elem1, strlen((char *)elem1));
+ myobjv[1] = Tcl_NewIntObj(elem2);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * PUBLIC: int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *,
+ * PUBLIC: db_recno_t, u_char *, int));
+ */
+int
+_SetListRecnoElem(interp, list, elem1, elem2, e2size)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ db_recno_t elem1;
+ u_char *elem2;
+ int e2size;
+{
+ Tcl_Obj *myobjv[2], *thislist;
+ int myobjc;
+
+ myobjc = 2;
+ myobjv[0] = Tcl_NewLongObj((long)elem1);
+ myobjv[1] = Tcl_NewByteArrayObj(elem2, e2size);
+ thislist = Tcl_NewListObj(myobjc, myobjv);
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+
+}
+
+/*
+ * _Set3DBTList --
+ * This is really analogous to both _SetListElem and
+ * _SetListRecnoElem--it's used for three-DBT lists returned by
+ * DB->pget and DBC->pget(). We'd need a family of four functions
+ * to handle all the recno/non-recno cases, however, so we make
+ * this a little more aware of the internals and do the logic inside.
+ *
+ * XXX
+ * One of these days all these functions should probably be cleaned up
+ * to eliminate redundancy and bring them into the standard DB
+ * function namespace.
+ *
+ * PUBLIC: int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int,
+ * PUBLIC: DBT *, int, DBT *));
+ */
+int
+_Set3DBTList(interp, list, elem1, is1recno, elem2, is2recno, elem3)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *elem1, *elem2, *elem3;
+ int is1recno, is2recno;
+{
+
+ Tcl_Obj *myobjv[3], *thislist;
+
+ if (is1recno)
+ myobjv[0] = Tcl_NewLongObj((long)*(db_recno_t *)elem1->data);
+ else
+ myobjv[0] =
+ Tcl_NewByteArrayObj((u_char *)elem1->data, elem1->size);
+
+ if (is2recno)
+ myobjv[1] = Tcl_NewLongObj((long)*(db_recno_t *)elem2->data);
+ else
+ myobjv[1] =
+ Tcl_NewByteArrayObj((u_char *)elem2->data, elem2->size);
+
+ myobjv[2] = Tcl_NewByteArrayObj((u_char *)elem3->data, elem3->size);
+
+ thislist = Tcl_NewListObj(3, myobjv);
+
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * _SetMultiList -- build a list for return from multiple get.
+ *
+ * PUBLIC: int _SetMultiList __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, DBT *, DBT*, int, int));
+ */
+int
+_SetMultiList(interp, list, key, data, type, flag)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *key, *data;
+ int type, flag;
+{
+ db_recno_t recno;
+ u_int32_t dlen, klen;
+ int result;
+ void *pointer, *dp, *kp;
+
+ recno = 0;
+ dlen = 0;
+ kp = NULL;
+
+ DB_MULTIPLE_INIT(pointer, data);
+ result = TCL_OK;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ recno = *(db_recno_t *) key->data;
+ else
+ kp = key->data;
+ klen = key->size;
+ do {
+ if (flag & DB_MULTIPLE_KEY) {
+ if (type == DB_RECNO || type == DB_QUEUE)
+ DB_MULTIPLE_RECNO_NEXT(pointer,
+ data, recno, dp, dlen);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ data, kp, klen, dp, dlen);
+ } else
+ DB_MULTIPLE_NEXT(pointer, data, dp, dlen);
+
+ if (pointer == NULL)
+ break;
+
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result =
+ _SetListRecnoElem(interp, list, recno, dp, dlen);
+ recno++;
+ } else
+ result = _SetListElem(interp, list, kp, klen, dp, dlen);
+ } while (result == TCL_OK);
+
+ return (result);
+}
+/*
+ * PUBLIC: int _GetGlobPrefix __P((char *, char **));
+ */
+int
+_GetGlobPrefix(pattern, prefix)
+ char *pattern;
+ char **prefix;
+{
+ int i, j;
+ char *p;
+
+ /*
+ * Duplicate it, we get enough space and most of the work is done.
+ */
+ if (__os_strdup(NULL, pattern, prefix) != 0)
+ return (1);
+
+ p = *prefix;
+ for (i = 0, j = 0; p[i] && !GLOB_CHAR(p[i]); i++, j++)
+ /*
+ * Check for an escaped character and adjust
+ */
+ if (p[i] == '\\' && p[i+1]) {
+ p[j] = p[i+1];
+ i++;
+ } else
+ p[j] = p[i];
+ p[j] = 0;
+ return (0);
+}
+
+/*
+ * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, int, char *));
+ */
+int
+_ReturnSetup(interp, ret, ok, errmsg)
+ Tcl_Interp *interp;
+ int ret, ok;
+ char *errmsg;
+{
+ char *msg;
+
+ if (ret > 0)
+ return (_ErrorSetup(interp, ret, errmsg));
+
+ /*
+ * We either have success or a DB error. If a DB error, set up the
+ * string. We return an error if not one of the errors we catch.
+ * If anyone wants to reset the result to return anything different,
+ * then the calling function is responsible for doing so via
+ * Tcl_ResetResult or another Tcl_SetObjResult.
+ */
+ if (ret == 0) {
+ Tcl_SetResult(interp, "0", TCL_STATIC);
+ return (TCL_OK);
+ }
+
+ msg = db_strerror(ret);
+ Tcl_AppendResult(interp, msg, NULL);
+
+ if (ok)
+ return (TCL_OK);
+ else {
+ Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL);
+ return (TCL_ERROR);
+ }
+}
+
+/*
+ * PUBLIC: int _ErrorSetup __P((Tcl_Interp *, int, char *));
+ */
+int
+_ErrorSetup(interp, ret, errmsg)
+ Tcl_Interp *interp;
+ int ret;
+ char *errmsg;
+{
+ Tcl_SetErrno(ret);
+ Tcl_AppendResult(interp, errmsg, ":", Tcl_PosixError(interp), NULL);
+ return (TCL_ERROR);
+}
+
+/*
+ * PUBLIC: void _ErrorFunc __P((CONST char *, char *));
+ */
+void
+_ErrorFunc(pfx, msg)
+ CONST char *pfx;
+ char *msg;
+{
+ DBTCL_INFO *p;
+ Tcl_Interp *interp;
+ int size;
+ char *err;
+
+ p = _NameToInfo(pfx);
+ if (p == NULL)
+ return;
+ interp = p->i_interp;
+
+ size = strlen(pfx) + strlen(msg) + 4;
+ /*
+ * If we cannot allocate enough to put together the prefix
+ * and message then give them just the message.
+ */
+ if (__os_malloc(NULL, size, &err) != 0) {
+ Tcl_AddErrorInfo(interp, msg);
+ Tcl_AppendResult(interp, msg, "\n", NULL);
+ return;
+ }
+ snprintf(err, size, "%s: %s", pfx, msg);
+ Tcl_AddErrorInfo(interp, err);
+ Tcl_AppendResult(interp, err, "\n", NULL);
+ __os_free(NULL, err);
+ return;
+}
+
+#define INVALID_LSNMSG "Invalid LSN with %d parts. Should have 2.\n"
+
+/*
+ * PUBLIC: int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+ */
+int
+_GetLsn(interp, obj, lsn)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ DB_LSN *lsn;
+{
+ Tcl_Obj **myobjv;
+ char msg[MSG_SIZE];
+ int myobjc, result;
+ u_int32_t tmp;
+
+ result = Tcl_ListObjGetElements(interp, obj, &myobjc, &myobjv);
+ if (result == TCL_ERROR)
+ return (result);
+ if (myobjc != 2) {
+ result = TCL_ERROR;
+ snprintf(msg, MSG_SIZE, INVALID_LSNMSG, myobjc);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (result);
+ }
+ result = _GetUInt32(interp, myobjv[0], &tmp);
+ if (result == TCL_ERROR)
+ return (result);
+ lsn->file = tmp;
+ result = _GetUInt32(interp, myobjv[1], &tmp);
+ lsn->offset = tmp;
+ return (result);
+}
+
+/*
+ * _GetUInt32 --
+ * Get a u_int32_t from a Tcl object. Tcl_GetIntFromObj does the
+ * right thing most of the time, but on machines where a long is 8 bytes
+ * and an int is 4 bytes, it errors on integers between the maximum
+ * int32_t and the maximum u_int32_t. This is correct, but we generally
+ * want a u_int32_t in the end anyway, so we use Tcl_GetLongFromObj and do
+ * the bounds checking ourselves.
+ *
+ * This code looks much like Tcl_GetIntFromObj, only with a different
+ * bounds check. It's essentially Tcl_GetUnsignedIntFromObj, which
+ * unfortunately doesn't exist.
+ *
+ * PUBLIC: int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *));
+ */
+int
+_GetUInt32(interp, obj, resp)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ u_int32_t *resp;
+{
+ int result;
+ long ltmp;
+
+ result = Tcl_GetLongFromObj(interp, obj, &ltmp);
+ if (result != TCL_OK)
+ return (result);
+
+ if ((unsigned long)ltmp != (u_int32_t)ltmp) {
+ if (interp != NULL) {
+ Tcl_ResetResult(interp);
+ Tcl_AppendToObj(Tcl_GetObjResult(interp),
+ "integer value too large for u_int32_t", -1);
+ }
+ return (TCL_ERROR);
+ }
+
+ *resp = (u_int32_t)ltmp;
+ return (TCL_OK);
+}
+
+/*
+ * tcl_flag_callback --
+ * Callback for db_pr.c functions that contain the FN struct mapping
+ * flag values to meaningful strings. This function appends a Tcl_Obj
+ * containing each pertinent flag string to the specified Tcl list.
+ */
+static void
+tcl_flag_callback(flags, fn, vtcbp)
+ u_int32_t flags;
+ const FN *fn;
+ void *vtcbp;
+{
+ const FN *fnp;
+ Tcl_Interp *interp;
+ Tcl_Obj *newobj, *listobj;
+ int result;
+ struct __tcl_callback_bundle *tcbp;
+
+ tcbp = (struct __tcl_callback_bundle *)vtcbp;
+ interp = tcbp->interp;
+ listobj = tcbp->obj;
+
+ for (fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ newobj = Tcl_NewStringObj(fnp->name, strlen(fnp->name));
+ result =
+ Tcl_ListObjAppendElement(interp, listobj, newobj);
+
+ /*
+ * Tcl_ListObjAppendElement is defined to return TCL_OK
+ * unless listobj isn't actually a list (or convertible
+ * into one). If this is the case, we screwed up badly
+ * somehow.
+ */
+ DB_ASSERT(result == TCL_OK);
+ }
+}
+
+/*
+ * _GetFlagsList --
+ * Get a new Tcl object, containing a list of the string values
+ * associated with a particular set of flag values, given a function
+ * that can extract the right names for the right flags.
+ *
+ * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t,
+ * PUBLIC: void (*)(u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *))));
+ */
+Tcl_Obj *
+_GetFlagsList(interp, flags, func)
+ Tcl_Interp *interp;
+ u_int32_t flags;
+ void (*func)
+ __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)));
+{
+ Tcl_Obj *newlist;
+ struct __tcl_callback_bundle tcb;
+
+ newlist = Tcl_NewObj();
+
+ memset(&tcb, 0, sizeof(tcb));
+ tcb.interp = interp;
+ tcb.obj = newlist;
+
+ func(flags, &tcb, tcl_flag_callback);
+
+ return (newlist);
+}
+
+int __debug_stop, __debug_on, __debug_print, __debug_test;
+
+/*
+ * PUBLIC: void _debug_check __P((void));
+ */
+void
+_debug_check()
+{
+ if (__debug_on == 0)
+ return;
+
+ if (__debug_print != 0) {
+ printf("\r%7d:", __debug_on);
+ fflush(stdout);
+ }
+ if (__debug_on++ == __debug_test || __debug_stop)
+ __db_loadme();
+}
+
+/*
+ * XXX
+ * Tcl 8.1+ Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
+ *
+ * There is a bug in Tcl 8.1+ and byte arrays in that if it happens
+ * to use an object as both a byte array and something else like
+ * an int, and you've done a Tcl_GetByteArrayFromObj, then you
+ * do a Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is for all byte arrays we want to use, if it can be
+ * represented as an integer, we copy it so that we don't lose the
+ * memory.
+ */
+/*
+ * PUBLIC: int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **,
+ * PUBLIC: u_int32_t *, int *));
+ */
+int
+_CopyObjBytes(interp, obj, newp, sizep, freep)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ void **newp;
+ u_int32_t *sizep;
+ int *freep;
+{
+ void *tmp, *new;
+ int i, len, ret;
+
+ /*
+ * If the object is not an int, then just return the byte
+ * array because it won't be transformed out from under us.
+ * If it is a number, we need to copy it.
+ */
+ *freep = 0;
+ ret = Tcl_GetIntFromObj(interp, obj, &i);
+ tmp = Tcl_GetByteArrayFromObj(obj, &len);
+ *sizep = len;
+ if (ret == TCL_ERROR) {
+ Tcl_ResetResult(interp);
+ *newp = tmp;
+ return (0);
+ }
+
+ /*
+ * If we get here, we have an integer that might be reused
+ * at some other point so we cannot count on GetByteArray
+ * keeping our pointer valid.
+ */
+ if ((ret = __os_malloc(NULL, len, &new)) != 0)
+ return (ret);
+ memcpy(new, tmp, len);
+ *newp = new;
+ *freep = 1;
+ return (0);
+}
diff --git a/storage/bdb/tcl/tcl_lock.c b/storage/bdb/tcl/tcl_lock.c
new file mode 100644
index 00000000000..6cb96dbb0da
--- /dev/null
+++ b/storage/bdb/tcl/tcl_lock.c
@@ -0,0 +1,739 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_lock.c,v 11.47 2002/08/08 15:27:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int lock_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int _LockMode __P((Tcl_Interp *, Tcl_Obj *, db_lockmode_t *));
+static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t,
+ u_int32_t, DBT *, db_lockmode_t, char *));
+static void _LockPutInfo __P((Tcl_Interp *, db_lockop_t, DB_LOCK *,
+ u_int32_t, DBT *));
+#if CONFIG_TEST
+static char *lkmode[] = {
+ "ng",
+ "read",
+ "write",
+ "iwrite",
+ "iread",
+ "iwr",
+ NULL
+};
+enum lkmode {
+ LK_NG,
+ LK_READ,
+ LK_WRITE,
+ LK_IWRITE,
+ LK_IREAD,
+ LK_IWR
+};
+
+/*
+ * tcl_LockDetect --
+ *
+ * PUBLIC: int tcl_LockDetect __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockDetect(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *ldopts[] = {
+ "expire",
+ "default",
+ "maxlocks",
+ "minlocks",
+ "minwrites",
+ "oldest",
+ "random",
+ "youngest",
+ NULL
+ };
+ enum ldopts {
+ LD_EXPIRE,
+ LD_DEFAULT,
+ LD_MAXLOCKS,
+ LD_MINLOCKS,
+ LD_MINWRITES,
+ LD_OLDEST,
+ LD_RANDOM,
+ LD_YOUNGEST
+ };
+ u_int32_t flag, policy;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = policy = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ ldopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum ldopts)optindex) {
+ case LD_EXPIRE:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_EXPIRE;
+ break;
+ case LD_DEFAULT:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_DEFAULT;
+ break;
+ case LD_MAXLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MAXLOCKS;
+ break;
+ case LD_MINWRITES:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINWRITE;
+ break;
+ case LD_MINLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINLOCKS;
+ break;
+ case LD_OLDEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_OLDEST;
+ break;
+ case LD_YOUNGEST:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_YOUNGEST;
+ break;
+ case LD_RANDOM:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_RANDOM;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = envp->lock_detect(envp, flag, policy, NULL);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock detect");
+ return (result);
+}
+
+/*
+ * tcl_LockGet --
+ *
+ * PUBLIC: int tcl_LockGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *lgopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lgopts {
+ LGNOWAIT
+ };
+ DBT obj;
+ Tcl_Obj *res;
+ void *otmp;
+ db_lockmode_t mode;
+ u_int32_t flag, lockid;
+ int freeobj, optindex, result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ freeobj = 0;
+ memset(newname, 0, MSG_SIZE);
+ if (objc != 5 && objc != 6) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-nowait? mode id obj");
+ return (TCL_ERROR);
+ }
+ /*
+ * Work back from required args.
+ * Last arg is obj.
+ * Second last is lock id.
+ * Third last is lock mode.
+ */
+ memset(&obj, 0, sizeof(obj));
+
+ if ((result =
+ _GetUInt32(interp, objv[objc-2], &lockid)) != TCL_OK)
+ return (result);
+
+ ret = _CopyObjBytes(interp, objv[objc-1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock get");
+ return (result);
+ }
+ obj.data = otmp;
+ if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK)
+ goto out;
+
+ /*
+ * Any left over arg is the flag.
+ */
+ flag = 0;
+ if (objc == 6) {
+ if (Tcl_GetIndexFromObj(interp, objv[(objc - 4)],
+ lgopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[(objc - 4)]));
+ switch ((enum lgopts)optindex) {
+ case LGNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ }
+
+ result = _GetThisLock(interp, envp, lockid, flag, &obj, mode, newname);
+ if (result == TCL_OK) {
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freeobj)
+ (void)__os_free(envp, otmp);
+ return (result);
+}
+
+/*
+ * tcl_LockStat --
+ *
+ * PUBLIC: int tcl_LockStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOCK_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->lock_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock stat");
+ if (result == TCL_ERROR)
+ return (result);
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Last allocated locker ID", sp->st_id);
+ MAKE_STAT_LIST("Current maximum unused locker ID", sp->st_cur_maxid);
+ MAKE_STAT_LIST("Maximum locks", sp->st_maxlocks);
+ MAKE_STAT_LIST("Maximum lockers", sp->st_maxlockers);
+ MAKE_STAT_LIST("Maximum objects", sp->st_maxobjects);
+ MAKE_STAT_LIST("Lock modes", sp->st_nmodes);
+ MAKE_STAT_LIST("Current number of locks", sp->st_nlocks);
+ MAKE_STAT_LIST("Maximum number of locks so far", sp->st_maxnlocks);
+ MAKE_STAT_LIST("Current number of lockers", sp->st_nlockers);
+ MAKE_STAT_LIST("Maximum number of lockers so far", sp->st_maxnlockers);
+ MAKE_STAT_LIST("Current number of objects", sp->st_nobjects);
+ MAKE_STAT_LIST("Maximum number of objects so far", sp->st_maxnobjects);
+ MAKE_STAT_LIST("Number of conflicts", sp->st_nconflicts);
+ MAKE_STAT_LIST("Lock requests", sp->st_nrequests);
+ MAKE_STAT_LIST("Lock releases", sp->st_nreleases);
+ MAKE_STAT_LIST("Lock requests that would have waited", sp->st_nnowaits);
+ MAKE_STAT_LIST("Deadlocks detected", sp->st_ndeadlocks);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Lock timeout value", sp->st_locktimeout);
+ MAKE_STAT_LIST("Number of lock timeouts", sp->st_nlocktimeouts);
+ MAKE_STAT_LIST("Transaction timeout value", sp->st_txntimeout);
+ MAKE_STAT_LIST("Number of transaction timeouts", sp->st_ntxntimeouts);
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_LockTimeout --
+ *
+ * PUBLIC: int tcl_LockTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock timeout");
+ return (result);
+}
+
+/*
+ * lock_Cmd --
+ * Implements the "lock" widget.
+ */
+static int
+lock_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Lock handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *lkcmds[] = {
+ "put",
+ NULL
+ };
+ enum lkcmds {
+ LKPUT
+ };
+ DB_ENV *env;
+ DB_LOCK *lock;
+ DBTCL_INFO *lkip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ lock = (DB_LOCK *)clientData;
+ lkip = _PtrToInfo((void *)lock);
+ result = TCL_OK;
+
+ if (lock == NULL) {
+ Tcl_SetResult(interp, "NULL lock", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (lkip == NULL) {
+ Tcl_SetResult(interp, "NULL lock info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ env = NAME_TO_ENV(lkip->i_parent->i_name);
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], lkcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ switch ((enum lkcmds)cmdindex) {
+ case LKPUT:
+ _debug_check();
+ ret = env->lock_put(env, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock put");
+ (void)Tcl_DeleteCommand(interp, lkip->i_name);
+ _DeleteInfo(lkip);
+ __os_free(env, lock);
+ break;
+ }
+ return (result);
+}
+
+/*
+ * tcl_LockVec --
+ *
+ * PUBLIC: int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockVec(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* environment pointer */
+{
+ static char *lvopts[] = {
+ "-nowait",
+ NULL
+ };
+ enum lvopts {
+ LVNOWAIT
+ };
+ static char *lkops[] = {
+ "get",
+ "put",
+ "put_all",
+ "put_obj",
+ "timeout",
+ NULL
+ };
+ enum lkops {
+ LKGET,
+ LKPUT,
+ LKPUTALL,
+ LKPUTOBJ,
+ LKTIMEOUT
+ };
+ DB_LOCK *lock;
+ DB_LOCKREQ list;
+ DBT obj;
+ Tcl_Obj **myobjv, *res, *thisop;
+ void *otmp;
+ u_int32_t flag, lockid;
+ int freeobj, i, myobjc, optindex, result, ret;
+ char *lockname, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ flag = 0;
+ freeobj = 0;
+
+ /*
+ * If -nowait is given, it MUST be first arg.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ lvopts, "option", TCL_EXACT, &optindex) == TCL_OK) {
+ switch ((enum lvopts)optindex) {
+ case LVNOWAIT:
+ flag |= DB_LOCK_NOWAIT;
+ break;
+ }
+ i = 3;
+ } else {
+ if (IS_HELP(objv[2]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ i = 2;
+ }
+
+ /*
+ * Our next arg MUST be the locker ID.
+ */
+ result = _GetUInt32(interp, objv[i++], &lockid);
+ if (result != TCL_OK)
+ return (result);
+
+ /*
+ * All other remaining args are operation tuples.
+ * Go through sequentially to decode, execute and build
+ * up list of return values.
+ */
+ res = Tcl_NewListObj(0, NULL);
+ while (i < objc) {
+ /*
+ * Get the list of the tuple.
+ */
+ lock = NULL;
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ /*
+ * First we will set up the list of requests.
+ * We will make a "second pass" after we get back
+ * the results from the lock_vec call to create
+ * the return list.
+ */
+ if (Tcl_GetIndexFromObj(interp, myobjv[0],
+ lkops, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(myobjv[0]);
+ goto error;
+ }
+ switch ((enum lkops)optindex) {
+ case LKGET:
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{get obj mode}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = _LockMode(interp, myobjv[2], &list.mode);
+ if (result != TCL_OK)
+ goto error;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
+ ret = _GetThisLock(interp, envp, lockid, flag,
+ &obj, list.mode, newname);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ thisop = Tcl_NewIntObj(ret);
+ (void)Tcl_ListObjAppendElement(interp, res,
+ thisop);
+ goto error;
+ }
+ thisop = Tcl_NewStringObj(newname, strlen(newname));
+ (void)Tcl_ListObjAppendElement(interp, res, thisop);
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
+ continue;
+ case LKPUT:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put lock}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT;
+ lockname = Tcl_GetStringFromObj(myobjv[1], NULL);
+ lock = NAME_TO_LOCK(lockname);
+ if (lock == NULL) {
+ snprintf(msg, MSG_SIZE, "Invalid lock: %s\n",
+ lockname);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.lock = *lock;
+ break;
+ case LKPUTALL:
+ if (myobjc != 1) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_all}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_ALL;
+ break;
+ case LKPUTOBJ:
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 1, myobjv,
+ "{put_obj obj}");
+ result = TCL_ERROR;
+ goto error;
+ }
+ list.op = DB_LOCK_PUT_OBJ;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
+ list.obj = &obj;
+ break;
+ case LKTIMEOUT:
+ list.op = DB_LOCK_TIMEOUT;
+ break;
+
+ }
+ /*
+ * We get here, we have set up our request, now call
+ * lock_vec.
+ */
+ _debug_check();
+ ret = envp->lock_vec(envp, lockid, flag, &list, 1, NULL);
+ /*
+ * Now deal with whether or not the operation succeeded.
+ * Get's were done above, all these are only puts.
+ */
+ thisop = Tcl_NewIntObj(ret);
+ result = Tcl_ListObjAppendElement(interp, res, thisop);
+ if (ret != 0 && result == TCL_OK)
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock put");
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
+ /*
+ * We did a put of some kind. Since we did that,
+ * we have to delete the commands associated with
+ * any of the locks we just put.
+ */
+ _LockPutInfo(interp, list.op, lock, lockid, &obj);
+ }
+
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+
+static int
+_LockMode(interp, obj, mode)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ db_lockmode_t *mode;
+{
+ int optindex;
+
+ if (Tcl_GetIndexFromObj(interp, obj, lkmode, "option",
+ TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(obj));
+ switch ((enum lkmode)optindex) {
+ case LK_NG:
+ *mode = DB_LOCK_NG;
+ break;
+ case LK_READ:
+ *mode = DB_LOCK_READ;
+ break;
+ case LK_WRITE:
+ *mode = DB_LOCK_WRITE;
+ break;
+ case LK_IREAD:
+ *mode = DB_LOCK_IREAD;
+ break;
+ case LK_IWRITE:
+ *mode = DB_LOCK_IWRITE;
+ break;
+ case LK_IWR:
+ *mode = DB_LOCK_IWR;
+ break;
+ }
+ return (TCL_OK);
+}
+
+static void
+_LockPutInfo(interp, op, lock, lockid, objp)
+ Tcl_Interp *interp;
+ db_lockop_t op;
+ DB_LOCK *lock;
+ u_int32_t lockid;
+ DBT *objp;
+{
+ DBTCL_INFO *p, *nextp;
+ int found;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ found = 0;
+ nextp = LIST_NEXT(p, entries);
+ if ((op == DB_LOCK_PUT && (p->i_lock == lock)) ||
+ (op == DB_LOCK_PUT_ALL && p->i_locker == lockid) ||
+ (op == DB_LOCK_PUT_OBJ && p->i_lockobj.data &&
+ memcmp(p->i_lockobj.data, objp->data, objp->size) == 0))
+ found = 1;
+ if (found) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ __os_free(NULL, p->i_lock);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+static int
+_GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
+ Tcl_Interp *interp; /* Interpreter */
+ DB_ENV *envp; /* Env handle */
+ u_int32_t lockid; /* Locker ID */
+ u_int32_t flag; /* Lock flag */
+ DBT *objp; /* Object to lock */
+ db_lockmode_t mode; /* Lock mode */
+ char *newname; /* New command name */
+{
+ DB_LOCK *lock;
+ DBTCL_INFO *envip, *ip;
+ int result, ret;
+
+ result = TCL_OK;
+ envip = _PtrToInfo((void *)envp);
+ if (envip == NULL) {
+ Tcl_SetResult(interp, "Could not find env info\n", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ snprintf(newname, MSG_SIZE, "%s.lock%d",
+ envip->i_name, envip->i_envlockid);
+ ip = _NewInfo(interp, NULL, newname, I_LOCK);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ ret = __os_malloc(envp, sizeof(DB_LOCK), &lock);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->lock_get(envp, lockid, flag, objp, mode, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock get");
+ if (result == TCL_ERROR) {
+ __os_free(envp, lock);
+ _DeleteInfo(ip);
+ return (result);
+ }
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this lock.
+ */
+ ret = __os_malloc(envp, objp->size, &ip->i_lockobj.data);
+ if (ret != 0) {
+ Tcl_SetResult(interp, "Could not duplicate obj",
+ TCL_STATIC);
+ (void)envp->lock_put(envp, lock);
+ __os_free(envp, lock);
+ _DeleteInfo(ip);
+ result = TCL_ERROR;
+ goto error;
+ }
+ memcpy(ip->i_lockobj.data, objp->data, objp->size);
+ ip->i_lockobj.size = objp->size;
+ envip->i_envlockid++;
+ ip->i_parent = envip;
+ ip->i_locker = lockid;
+ _SetInfoData(ip, lock);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)lock_Cmd, (ClientData)lock, NULL);
+error:
+ return (result);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_log.c b/storage/bdb/tcl/tcl_log.c
new file mode 100644
index 00000000000..be6eebfb013
--- /dev/null
+++ b/storage/bdb/tcl/tcl_log.c
@@ -0,0 +1,610 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_log.c,v 11.52 2002/08/14 20:11:57 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/txn.h"
+
+#ifdef CONFIG_TEST
+static int tcl_LogcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_LOGC *));
+
+/*
+ * tcl_LogArchive --
+ *
+ * PUBLIC: int tcl_LogArchive __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogArchive(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *archopts[] = {
+ "-arch_abs", "-arch_data", "-arch_log",
+ NULL
+ };
+ enum archopts {
+ ARCH_ABS, ARCH_DATA, ARCH_LOG
+ };
+ Tcl_Obj *fileobj, *res;
+ u_int32_t flag;
+ int i, optindex, result, ret;
+ char **file, **list;
+
+ result = TCL_OK;
+ flag = 0;
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ archopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum archopts)optindex) {
+ case ARCH_ABS:
+ flag |= DB_ARCH_ABS;
+ break;
+ case ARCH_DATA:
+ flag |= DB_ARCH_DATA;
+ break;
+ case ARCH_LOG:
+ flag |= DB_ARCH_LOG;
+ break;
+ }
+ }
+ _debug_check();
+ list = NULL;
+ ret = envp->log_archive(envp, &list, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log archive");
+ if (result == TCL_OK) {
+ res = Tcl_NewListObj(0, NULL);
+ for (file = list; file != NULL && *file != NULL; file++) {
+ fileobj = Tcl_NewStringObj(*file, strlen(*file));
+ result = Tcl_ListObjAppendElement(interp, res, fileobj);
+ if (result != TCL_OK)
+ break;
+ }
+ Tcl_SetObjResult(interp, res);
+ }
+ if (list != NULL)
+ __os_ufree(envp, list);
+ return (result);
+}
+
+/*
+ * tcl_LogCompare --
+ *
+ * PUBLIC: int tcl_LogCompare __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*));
+ */
+int
+tcl_LogCompare(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ DB_LSN lsn0, lsn1;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 4 args.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn1 lsn2");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn0);
+ if (result == TCL_ERROR)
+ return (result);
+ result = _GetLsn(interp, objv[3], &lsn1);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = log_compare(&lsn0, &lsn1);
+ res = Tcl_NewIntObj(ret);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_LogFile --
+ *
+ * PUBLIC: int tcl_LogFile __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFile(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn;
+ Tcl_Obj *res;
+ size_t len;
+ int result, ret;
+ char *name;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+
+ len = MSG_SIZE;
+ ret = ENOMEM;
+ name = NULL;
+ while (ret == ENOMEM) {
+ if (name != NULL)
+ __os_free(envp, name);
+ ret = __os_malloc(envp, len, &name);
+ if (ret != 0) {
+ Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
+ break;
+ }
+ _debug_check();
+ ret = envp->log_file(envp, &lsn, name, len);
+ len *= 2;
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_file");
+ if (ret == 0) {
+ res = Tcl_NewStringObj(name, strlen(name));
+ Tcl_SetObjResult(interp, res);
+ }
+
+ if (name != NULL)
+ __os_free(envp, name);
+
+ return (result);
+}
+
+/*
+ * tcl_LogFlush --
+ *
+ * PUBLIC: int tcl_LogFlush __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogFlush(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LSN lsn, *lsnp;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 2 or 3 args.
+ */
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?lsn?");
+ return (TCL_ERROR);
+ }
+
+ if (objc == 3) {
+ lsnp = &lsn;
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ } else
+ lsnp = NULL;
+
+ _debug_check();
+ ret = envp->log_flush(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_flush");
+ return (result);
+}
+
+/*
+ * tcl_LogGet --
+ *
+ * PUBLIC: int tcl_LogGet __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogGet(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ COMPQUIET(objv, NULL);
+ COMPQUIET(objc, 0);
+ COMPQUIET(envp, NULL);
+
+ Tcl_SetResult(interp, "FAIL: log_get deprecated\n", TCL_STATIC);
+ return (TCL_ERROR);
+}
+
+/*
+ * tcl_LogPut --
+ *
+ * PUBLIC: int tcl_LogPut __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogPut(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *logputopts[] = {
+ "-flush",
+ NULL
+ };
+ enum logputopts {
+ LOGPUT_FLUSH
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *intobj, *res;
+ void *dtmp;
+ u_int32_t flag;
+ int freedata, optindex, result, ret;
+
+ result = TCL_OK;
+ flag = 0;
+ freedata = 0;
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? record");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Data/record must be the last arg.
+ */
+ memset(&data, 0, sizeof(data));
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log put");
+ return (result);
+ }
+ data.data = dtmp;
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ if (objc == 4) {
+ if (Tcl_GetIndexFromObj(interp, objv[2],
+ logputopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[2]));
+ }
+ switch ((enum logputopts)optindex) {
+ case LOGPUT_FLUSH:
+ flag = DB_FLUSH;
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = envp->log_put(envp, &lsn, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_put");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewListObj(0, NULL);
+ intobj = Tcl_NewLongObj((long)lsn.file);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ intobj = Tcl_NewLongObj((long)lsn.offset);
+ result = Tcl_ListObjAppendElement(interp, res, intobj);
+ Tcl_SetObjResult(interp, res);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ return (result);
+}
+/*
+ * tcl_LogStat --
+ *
+ * PUBLIC: int tcl_LogStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LogStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_LOG_STAT *sp;
+ Tcl_Obj *res;
+ int result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->log_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Magic", sp->st_magic);
+ MAKE_STAT_LIST("Log file Version", sp->st_version);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Log file mode", sp->st_mode);
+ MAKE_STAT_LIST("Log record cache size", sp->st_lg_bsize);
+ MAKE_STAT_LIST("Current log file size", sp->st_lg_size);
+ MAKE_STAT_LIST("Mbytes written", sp->st_w_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb)", sp->st_w_bytes);
+ MAKE_STAT_LIST("Mbytes written since checkpoint", sp->st_wc_mbytes);
+ MAKE_STAT_LIST("Bytes written (over Mb) since checkpoint",
+ sp->st_wc_bytes);
+ MAKE_STAT_LIST("Times log written", sp->st_wcount);
+ MAKE_STAT_LIST("Times log written because cache filled up",
+ sp->st_wcount_fill);
+ MAKE_STAT_LIST("Times log flushed", sp->st_scount);
+ MAKE_STAT_LIST("Current log file number", sp->st_cur_file);
+ MAKE_STAT_LIST("Current log file offset", sp->st_cur_offset);
+ MAKE_STAT_LIST("On-disk log file number", sp->st_disk_file);
+ MAKE_STAT_LIST("On-disk log file offset", sp->st_disk_offset);
+ MAKE_STAT_LIST("Max commits in a log flush", sp->st_maxcommitperflush);
+ MAKE_STAT_LIST("Min commits in a log flush", sp->st_mincommitperflush);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * logc_Cmd --
+ * Implements the log cursor command.
+ *
+ * PUBLIC: int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+logc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *logccmds[] = {
+ "close",
+ "get",
+ NULL
+ };
+ enum logccmds {
+ LOGCCLOSE,
+ LOGCGET
+ };
+ DB_LOGC *logc;
+ DBTCL_INFO *logcip;
+ int cmdindex, result, ret;
+
+ Tcl_ResetResult(interp);
+ logc = (DB_LOGC *)clientData;
+ logcip = _PtrToInfo((void *)logc);
+ result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (logc == NULL) {
+ Tcl_SetResult(interp, "NULL logc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (logcip == NULL) {
+ Tcl_SetResult(interp, "NULL logc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp, objv[1], logccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum logccmds)cmdindex) {
+ case LOGCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = logc->close(logc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "logc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, logcip->i_name);
+ _DeleteInfo(logcip);
+ }
+ break;
+ case LOGCGET:
+ result = tcl_LogcGet(interp, objc, objv, logc);
+ break;
+ }
+ return (result);
+}
+
+static int
+tcl_LogcGet(interp, objc, objv, logc)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj * CONST *objv;
+ DB_LOGC *logc;
+{
+ static char *logcgetopts[] = {
+ "-current",
+ "-first",
+ "-last",
+ "-next",
+ "-prev",
+ "-set",
+ NULL
+ };
+ enum logcgetopts {
+ LOGCGET_CURRENT,
+ LOGCGET_FIRST,
+ LOGCGET_LAST,
+ LOGCGET_NEXT,
+ LOGCGET_PREV,
+ LOGCGET_SET
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res;
+ u_int32_t flag;
+ int i, myobjc, optindex, result, ret;
+
+ result = TCL_OK;
+ res = NULL;
+ flag = 0;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ logcgetopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum logcgetopts)optindex) {
+ case LOGCGET_CURRENT:
+ FLAG_CHECK(flag);
+ flag |= DB_CURRENT;
+ break;
+ case LOGCGET_FIRST:
+ FLAG_CHECK(flag);
+ flag |= DB_FIRST;
+ break;
+ case LOGCGET_LAST:
+ FLAG_CHECK(flag);
+ flag |= DB_LAST;
+ break;
+ case LOGCGET_NEXT:
+ FLAG_CHECK(flag);
+ flag |= DB_NEXT;
+ break;
+ case LOGCGET_PREV:
+ FLAG_CHECK(flag);
+ flag |= DB_PREV;
+ break;
+ case LOGCGET_SET:
+ FLAG_CHECK(flag);
+ flag |= DB_SET;
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetLsn(interp, objv[i++], &lsn);
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ memset(&data, 0, sizeof(data));
+
+ _debug_check();
+ ret = logc->get(logc, &lsn, &data, flag);
+
+ res = Tcl_NewListObj(0, NULL);
+ if (res == NULL)
+ goto memerr;
+
+ if (ret == 0) {
+ /*
+ * Success. Set up return list as {LSN data} where LSN
+ * is a sublist {file offset}.
+ */
+ myobjc = 2;
+ myobjv[0] = Tcl_NewLongObj((long)lsn.file);
+ myobjv[1] = Tcl_NewLongObj((long)lsn.offset);
+ lsnlist = Tcl_NewListObj(myobjc, myobjv);
+ if (lsnlist == NULL)
+ goto memerr;
+
+ result = Tcl_ListObjAppendElement(interp, res, lsnlist);
+ dataobj = Tcl_NewStringObj(data.data, data.size);
+ if (dataobj == NULL) {
+ goto memerr;
+ }
+ result = Tcl_ListObjAppendElement(interp, res, dataobj);
+ } else
+ result = _ReturnSetup(interp, ret, DB_RETOK_LGGET(ret),
+ "DB_LOGC->get");
+
+ Tcl_SetObjResult(interp, res);
+
+ if (0) {
+memerr: if (res != NULL)
+ Tcl_DecrRefCount(res);
+ Tcl_SetResult(interp, "allocation failed", TCL_STATIC);
+ }
+
+ return (result);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_mp.c b/storage/bdb/tcl/tcl_mp.c
new file mode 100644
index 00000000000..0c4411cb58a
--- /dev/null
+++ b/storage/bdb/tcl/tcl_mp.c
@@ -0,0 +1,864 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_mp.c,v 11.39 2002/08/06 06:21:27 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mp_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int pg_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int tcl_MpGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ DB_MPOOLFILE *, DBTCL_INFO *));
+static int tcl_Pg __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DB_MPOOLFILE *, DBTCL_INFO *, int));
+static int tcl_PgInit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+static int tcl_PgIsset __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
+ void *, DBTCL_INFO *));
+
+/*
+ * _MpInfoDelete --
+ * Removes "sub" mp page info structures that are children
+ * of this mp.
+ *
+ * PUBLIC: void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_MpInfoDelete(interp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *mpip; /* Info for mp */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * mp. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == mpip && p->i_type == I_PG) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_MpSync --
+ *
+ * PUBLIC: int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpSync(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ DB_LSN lsn, *lsnp;
+ int result, ret;
+
+ result = TCL_OK;
+ lsnp = NULL;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc == 3) {
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ lsnp = &lsn;
+ }
+ else if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lsn");
+ return (TCL_ERROR);
+ }
+
+ _debug_check();
+ ret = envp->memp_sync(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync");
+ return (result);
+}
+
+/*
+ * tcl_MpTrickle --
+ *
+ * PUBLIC: int tcl_MpTrickle __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpTrickle(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+
+ int pages;
+ int percent;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+
+ result = TCL_OK;
+ /*
+ * No flags, must be 3 args.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "percent");
+ return (TCL_ERROR);
+ }
+
+ result = Tcl_GetIntFromObj(interp, objv[2], &percent);
+ if (result == TCL_ERROR)
+ return (result);
+
+ _debug_check();
+ ret = envp->memp_trickle(envp, percent, &pages);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp trickle");
+ if (result == TCL_ERROR)
+ return (result);
+
+ res = Tcl_NewIntObj(pages);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+
+}
+
+/*
+ * tcl_Mp --
+ *
+ * PUBLIC: int tcl_Mp __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Mp(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *mpopts[] = {
+ "-create",
+ "-mode",
+ "-nommap",
+ "-pagesize",
+ "-rdonly",
+ NULL
+ };
+ enum mpopts {
+ MPCREATE,
+ MPMODE,
+ MPNOMMAP,
+ MPPAGE,
+ MPRDONLY
+ };
+ DBTCL_INFO *ip;
+ DB_MPOOLFILE *mpf;
+ Tcl_Obj *res;
+ u_int32_t flag;
+ int i, pgsize, mode, optindex, result, ret;
+ char *file, newname[MSG_SIZE];
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ mode = 0;
+ pgsize = 0;
+ memset(newname, 0, MSG_SIZE);
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the file name.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpopts)optindex) {
+ case MPCREATE:
+ flag |= DB_CREATE;
+ break;
+ case MPNOMMAP:
+ flag |= DB_NOMMAP;
+ break;
+ case MPPAGE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-pagesize size?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &pgsize);
+ break;
+ case MPRDONLY:
+ flag |= DB_RDONLY;
+ break;
+ case MPMODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a file name. It better be the last arg.
+ */
+ file = NULL;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?file?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ file = Tcl_GetStringFromObj(objv[i++], NULL);
+ }
+
+ snprintf(newname, sizeof(newname), "%s.mp%d",
+ envip->i_name, envip->i_envmpid);
+ ip = _NewInfo(interp, NULL, newname, I_MP);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ _debug_check();
+ if ((ret = envp->memp_fcreate(envp, &mpf, 0)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
+ _DeleteInfo(ip);
+ goto error;
+ }
+
+ /*
+ * XXX
+ * Interface doesn't currently support DB_MPOOLFILE configuration.
+ */
+ if ((ret = mpf->open(mpf, file, flag, mode, (size_t)pgsize)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
+ _DeleteInfo(ip);
+
+ (void)mpf->close(mpf, 0);
+ goto error;
+ }
+
+ /*
+ * Success. Set up return. Set up new info and command widget for
+ * this mpool.
+ */
+ envip->i_envmpid++;
+ ip->i_parent = envip;
+ ip->i_pgsz = pgsize;
+ _SetInfoData(ip, mpf);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+error:
+ return (result);
+}
+
+/*
+ * tcl_MpStat --
+ *
+ * PUBLIC: int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_MpStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DB_MPOOL_STAT *sp;
+ DB_MPOOL_FSTAT **fsp, **savefsp;
+ int result;
+ int ret;
+ Tcl_Obj *res;
+ Tcl_Obj *res1;
+
+ result = TCL_OK;
+ savefsp = NULL;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->memp_stat(envp, &sp, &fsp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Cache size (gbytes)", sp->st_gbytes);
+ MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes);
+ MAKE_STAT_LIST("Number of caches", sp->st_ncache);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LIST("Pages mapped into address space", sp->st_map);
+ MAKE_STAT_LIST("Cache hits", sp->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", sp->st_cache_miss);
+ MAKE_STAT_LIST("Pages created", sp->st_page_create);
+ MAKE_STAT_LIST("Pages read in", sp->st_page_in);
+ MAKE_STAT_LIST("Pages written", sp->st_page_out);
+ MAKE_STAT_LIST("Clean page evictions", sp->st_ro_evict);
+ MAKE_STAT_LIST("Dirty page evictions", sp->st_rw_evict);
+ MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle);
+ MAKE_STAT_LIST("Cached pages", sp->st_pages);
+ MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean);
+ MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty);
+ MAKE_STAT_LIST("Hash buckets", sp->st_hash_buckets);
+ MAKE_STAT_LIST("Hash lookups", sp->st_hash_searches);
+ MAKE_STAT_LIST("Longest hash chain found", sp->st_hash_longest);
+ MAKE_STAT_LIST("Hash elements examined", sp->st_hash_examined);
+ MAKE_STAT_LIST("Number of hash bucket nowaits", sp->st_hash_nowait);
+ MAKE_STAT_LIST("Number of hash bucket waits", sp->st_hash_wait);
+ MAKE_STAT_LIST("Maximum number of hash bucket waits",
+ sp->st_hash_max_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Page allocations", sp->st_alloc);
+ MAKE_STAT_LIST("Buckets examined during allocation",
+ sp->st_alloc_buckets);
+ MAKE_STAT_LIST("Maximum buckets examined during allocation",
+ sp->st_alloc_max_buckets);
+ MAKE_STAT_LIST("Pages examined during allocation", sp->st_alloc_pages);
+ MAKE_STAT_LIST("Maximum pages examined during allocation",
+ sp->st_alloc_max_pages);
+
+ /*
+ * Save global stat list as res1. The MAKE_STAT_LIST
+ * macro assumes 'res' so we'll use that to build up
+ * our per-file sublist.
+ */
+ res1 = res;
+ for (savefsp = fsp; fsp != NULL && *fsp != NULL; fsp++) {
+ res = Tcl_NewObj();
+ result = _SetListElem(interp, res, "File Name",
+ strlen("File Name"), (*fsp)->file_name,
+ strlen((*fsp)->file_name));
+ if (result != TCL_OK)
+ goto error;
+ MAKE_STAT_LIST("Page size", (*fsp)->st_pagesize);
+ MAKE_STAT_LIST("Pages mapped into address space",
+ (*fsp)->st_map);
+ MAKE_STAT_LIST("Cache hits", (*fsp)->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", (*fsp)->st_cache_miss);
+ MAKE_STAT_LIST("Pages created", (*fsp)->st_page_create);
+ MAKE_STAT_LIST("Pages read in", (*fsp)->st_page_in);
+ MAKE_STAT_LIST("Pages written", (*fsp)->st_page_out);
+ /*
+ * Now that we have a complete "per-file" stat list, append
+ * that to the other list.
+ */
+ result = Tcl_ListObjAppendElement(interp, res1, res);
+ if (result != TCL_OK)
+ goto error;
+ }
+ Tcl_SetObjResult(interp, res1);
+error:
+ free(sp);
+ if (savefsp != NULL)
+ free(savefsp);
+ return (result);
+}
+
+/*
+ * mp_Cmd --
+ * Implements the "mp" widget.
+ */
+static int
+mp_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mp handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mpcmds[] = {
+ "close",
+ "fsync",
+ "get",
+ NULL
+ };
+ enum mpcmds {
+ MPCLOSE,
+ MPFSYNC,
+ MPGET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result, ret;
+ DBTCL_INFO *mpip;
+ Tcl_Obj *res;
+ char *obj_name;
+
+ Tcl_ResetResult(interp);
+ mp = (DB_MPOOLFILE *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ mpip = _NameToInfo(obj_name);
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mpcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mpcmds)cmdindex) {
+ case MPCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = mp->close(mp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "mp close");
+ _MpInfoDelete(interp, mpip);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ break;
+ case MPFSYNC:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = mp->sync(mp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case MPGET:
+ result = tcl_MpGet(interp, objc, objv, mp, mpip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ * tcl_MpGet --
+ */
+static int
+tcl_MpGet(interp, objc, objv, mp, mpip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_MPOOLFILE *mp; /* mp pointer */
+ DBTCL_INFO *mpip; /* mp info pointer */
+{
+ static char *mpget[] = {
+ "-create",
+ "-last",
+ "-new",
+ NULL
+ };
+ enum mpget {
+ MPGET_CREATE,
+ MPGET_LAST,
+ MPGET_NEW
+ };
+
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ db_pgno_t pgno;
+ u_int32_t flag;
+ int i, ipgno, optindex, result, ret;
+ char newname[MSG_SIZE];
+ void *page;
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ mpget, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ /*
+ * Reset the result so we don't get an errant
+ * error message if there is another error.
+ * This arg is the page number.
+ */
+ if (IS_HELP(objv[i]) == TCL_OK)
+ return (TCL_OK);
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum mpget)optindex) {
+ case MPGET_CREATE:
+ flag |= DB_MPOOL_CREATE;
+ break;
+ case MPGET_LAST:
+ flag |= DB_MPOOL_LAST;
+ break;
+ case MPGET_NEW:
+ flag |= DB_MPOOL_NEW;
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ }
+ /*
+ * Any left over arg is a page number. It better be the last arg.
+ */
+ ipgno = 0;
+ if (i != objc) {
+ if (i != objc - 1) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? ?pgno?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &ipgno);
+ if (result != TCL_OK)
+ goto error;
+ }
+
+ snprintf(newname, sizeof(newname), "%s.pg%d",
+ mpip->i_name, mpip->i_mppgid);
+ ip = _NewInfo(interp, NULL, newname, I_PG);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ pgno = ipgno;
+ ret = mp->get(mp, &pgno, flag, &page);
+ result = _ReturnSetup(interp, ret, DB_RETOK_MPGET(ret), "mpool get");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mpool.
+ */
+ mpip->i_mppgid++;
+ ip->i_parent = mpip;
+ ip->i_pgno = pgno;
+ ip->i_pgsz = mpip->i_pgsz;
+ _SetInfoData(ip, page);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)pg_Cmd, (ClientData)page, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ }
+error:
+ return (result);
+}
+
+/*
+ * pg_Cmd --
+ * Implements the "pg" widget.
+ */
+static int
+pg_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Page handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *pgcmds[] = {
+ "init",
+ "is_setto",
+ "pgnum",
+ "pgsize",
+ "put",
+ "set",
+ NULL
+ };
+ enum pgcmds {
+ PGINIT,
+ PGISSET,
+ PGNUM,
+ PGSIZE,
+ PGPUT,
+ PGSET
+ };
+ DB_MPOOLFILE *mp;
+ int cmdindex, length, result;
+ char *obj_name;
+ void *page;
+ DBTCL_INFO *pgip;
+ Tcl_Obj *res;
+
+ Tcl_ResetResult(interp);
+ page = (void *)clientData;
+ obj_name = Tcl_GetStringFromObj(objv[0], &length);
+ pgip = _NameToInfo(obj_name);
+ mp = NAME_TO_MP(pgip->i_parent->i_name);
+ result = TCL_OK;
+
+ if (page == NULL) {
+ Tcl_SetResult(interp, "NULL page pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (pgip == NULL) {
+ Tcl_SetResult(interp, "NULL page info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], pgcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum pgcmds)cmdindex) {
+ case PGNUM:
+ res = Tcl_NewLongObj((long)pgip->i_pgno);
+ break;
+ case PGSIZE:
+ res = Tcl_NewLongObj(pgip->i_pgsz);
+ break;
+ case PGSET:
+ case PGPUT:
+ result = tcl_Pg(interp, objc, objv, page, mp, pgip,
+ cmdindex == PGSET ? 0 : 1);
+ break;
+ case PGINIT:
+ result = tcl_PgInit(interp, objc, objv, page, pgip);
+ break;
+ case PGISSET:
+ result = tcl_PgIsset(interp, objc, objv, page, pgip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_Pg(interp, objc, objv, page, mp, pgip, putop)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DB_MPOOLFILE *mp; /* Mpool pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+ int putop; /* Operation */
+{
+ static char *pgopt[] = {
+ "-clean",
+ "-dirty",
+ "-discard",
+ NULL
+ };
+ enum pgopt {
+ PGCLEAN,
+ PGDIRTY,
+ PGDISCARD
+ };
+ u_int32_t flag;
+ int i, optindex, result, ret;
+
+ result = TCL_OK;
+ i = 2;
+ flag = 0;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ pgopt, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum pgopt)optindex) {
+ case PGCLEAN:
+ flag |= DB_MPOOL_CLEAN;
+ break;
+ case PGDIRTY:
+ flag |= DB_MPOOL_DIRTY;
+ break;
+ case PGDISCARD:
+ flag |= DB_MPOOL_DISCARD;
+ break;
+ }
+ }
+
+ _debug_check();
+ if (putop)
+ ret = mp->put(mp, page, flag);
+ else
+ ret = mp->set(mp, page, flag);
+
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "page");
+
+ if (putop) {
+ (void)Tcl_DeleteCommand(interp, pgip->i_name);
+ _DeleteInfo(pgip);
+ }
+ return (result);
+}
+
+static int
+tcl_PgInit(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ s = Tcl_GetByteArrayFromObj(objv[2], &length);
+ if (s == NULL)
+ return (TCL_ERROR);
+ memcpy(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz);
+ result = TCL_OK;
+ } else {
+ p = (long *)page;
+ for (endp = p + (pgsz / sizeof(long)); p < endp; p++)
+ *p = newval;
+ }
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_PgIsset(interp, objc, objv, page, pgip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ void *page; /* Page pointer */
+ DBTCL_INFO *pgip; /* Info pointer */
+{
+ Tcl_Obj *res;
+ size_t pgsz;
+ long *p, *endp, newval;
+ int length, result;
+ u_char *s;
+
+ result = TCL_OK;
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "val");
+ return (TCL_ERROR);
+ }
+
+ pgsz = pgip->i_pgsz;
+ result = Tcl_GetLongFromObj(interp, objv[2], &newval);
+ if (result != TCL_OK) {
+ if ((s = Tcl_GetByteArrayFromObj(objv[2], &length)) == NULL)
+ return (TCL_ERROR);
+ result = TCL_OK;
+
+ if (memcmp(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz ) != 0) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ } else {
+ p = (long *)page;
+ /*
+ * If any value is not the same, return 0 (is not set to
+ * this value). Otherwise, if we finish the loop, we return 1
+ * (is set to this value).
+ */
+ for (endp = p + (pgsz/sizeof(long)); p < endp; p++)
+ if (*p != newval) {
+ res = Tcl_NewIntObj(0);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+ }
+ }
+
+ res = Tcl_NewIntObj(1);
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_rep.c b/storage/bdb/tcl/tcl_rep.c
new file mode 100644
index 00000000000..c72c9971338
--- /dev/null
+++ b/storage/bdb/tcl/tcl_rep.c
@@ -0,0 +1,405 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_rep.c,v 11.85 2002/08/06 04:45:44 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+#if CONFIG_TEST
+/*
+ * tcl_RepElect --
+ * Call DB_ENV->rep_elect().
+ *
+ * PUBLIC: int tcl_RepElect
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepElect(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int eid, nsites, pri, result, ret;
+ u_int32_t timeout;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "nsites pri timeout");
+ return (TCL_ERROR);
+ }
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &nsites)) != TCL_OK)
+ return (result);
+ if ((result = Tcl_GetIntFromObj(interp, objv[3], &pri)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[4], &timeout)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->rep_elect(dbenv, nsites, pri, timeout, &eid)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env rep_elect"));
+
+ Tcl_SetObjResult(interp, Tcl_NewIntObj(eid));
+
+ return (TCL_OK);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepFlush --
+ * Call DB_ENV->rep_flush().
+ *
+ * PUBLIC: int tcl_RepFlush
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepFlush(interp, objc, objv, dbenv)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+
+ _debug_check();
+ ret = dbenv->rep_flush(dbenv);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_flush"));
+}
+#endif
+#if CONFIG_TEST
+/*
+ * tcl_RepLimit --
+ * Call DB_ENV->set_rep_limit().
+ *
+ * PUBLIC: int tcl_RepLimit
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepLimit(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t bytes, gbytes;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "gbytes bytes");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &gbytes)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &bytes)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_limit(dbenv, gbytes, bytes)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_limit"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_limit"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepRequest --
+ * Call DB_ENV->set_rep_request().
+ *
+ * PUBLIC: int tcl_RepRequest
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepRequest(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t min, max;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "min max");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &min)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &max)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_request(dbenv, min, max)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_request"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_request"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStart --
+ * Call DB_ENV->rep_start().
+ *
+ * PUBLIC: int tcl_RepStart
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ *
+ * Note that this normally can/should be achieved as an argument to
+ * berkdb env, but we need to test forcible upgrading of clients, which
+ * involves calling this on an open environment handle.
+ */
+int
+tcl_RepStart(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *tclrpstrt[] = {
+ "-client",
+ "-master",
+ NULL
+ };
+ enum tclrpstrt {
+ TCL_RPSTRT_CLIENT,
+ TCL_RPSTRT_MASTER
+ };
+ char *arg;
+ int i, optindex, ret;
+ u_int32_t flag;
+
+ flag = 0;
+
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, "[-master/-client]");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], tclrpstrt,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum tclrpstrt)optindex) {
+ case TCL_RPSTRT_CLIENT:
+ flag |= DB_REP_CLIENT;
+ break;
+ case TCL_RPSTRT_MASTER:
+ flag |= DB_REP_MASTER;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_start(dbenv, NULL, flag);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_start"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepProcessMessage --
+ * Call DB_ENV->rep_process_message().
+ *
+ * PUBLIC: int tcl_RepProcessMessage
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepProcessMessage(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ DBT control, rec;
+ Tcl_Obj *res;
+ void *ctmp, *rtmp;
+ int eid;
+ int freectl, freerec, result, ret;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "id control rec");
+ return (TCL_ERROR);
+ }
+ freectl = freerec = 0;
+
+ memset(&control, 0, sizeof(control));
+ memset(&rec, 0, sizeof(rec));
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &eid)) != TCL_OK)
+ return (result);
+
+ ret = _CopyObjBytes(interp, objv[3], &ctmp,
+ &control.size, &freectl);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ return (result);
+ }
+ control.data = ctmp;
+ ret = _CopyObjBytes(interp, objv[4], &rtmp,
+ &rec.size, &freerec);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ goto out;
+ }
+ rec.data = rtmp;
+ _debug_check();
+ ret = dbenv->rep_process_message(dbenv, &control, &rec, &eid);
+ result = _ReturnSetup(interp, ret, DB_RETOK_REPPMSG(ret),
+ "env rep_process_message");
+
+ /*
+ * If we have a new master, return its environment ID.
+ *
+ * XXX
+ * We should do something prettier to differentiate success
+ * from an env ID, and figure out how to represent HOLDELECTION.
+ */
+ if (result == TCL_OK && ret == DB_REP_NEWMASTER) {
+ res = Tcl_NewIntObj(eid);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freectl)
+ (void)__os_free(NULL, ctmp);
+ if (freerec)
+ (void)__os_free(NULL, rtmp);
+
+ return (result);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStat --
+ * Call DB_ENV->rep_stat().
+ *
+ * PUBLIC: int tcl_RepStat
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepStat(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ DB_REP_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t flag;
+ int myobjc, result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-clear") == 0)
+ flag = DB_STAT_CLEAR;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_stat(dbenv, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "rep stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_* assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LSN("Next LSN expected", &sp->st_next_lsn);
+ MAKE_STAT_LSN("First missed LSN", &sp->st_waiting_lsn);
+ MAKE_STAT_LIST("Duplicate master conditions", sp->st_dupmasters);
+ MAKE_STAT_LIST("Environment ID", sp->st_env_id);
+ MAKE_STAT_LIST("Environment priority", sp->st_env_priority);
+ MAKE_STAT_LIST("Generation number", sp->st_gen);
+ MAKE_STAT_LIST("Duplicate log records received", sp->st_log_duplicated);
+ MAKE_STAT_LIST("Current log records queued", sp->st_log_queued);
+ MAKE_STAT_LIST("Maximum log records queued", sp->st_log_queued_max);
+ MAKE_STAT_LIST("Total log records queued", sp->st_log_queued_total);
+ MAKE_STAT_LIST("Log records received", sp->st_log_records);
+ MAKE_STAT_LIST("Log records requested", sp->st_log_requested);
+ MAKE_STAT_LIST("Master environment ID", sp->st_master);
+ MAKE_STAT_LIST("Master changes", sp->st_master_changes);
+ MAKE_STAT_LIST("Messages with bad generation number",
+ sp->st_msgs_badgen);
+ MAKE_STAT_LIST("Messages processed", sp->st_msgs_processed);
+ MAKE_STAT_LIST("Messages ignored for recovery", sp->st_msgs_recover);
+ MAKE_STAT_LIST("Message send failures", sp->st_msgs_send_failures);
+ MAKE_STAT_LIST("Messages sent", sp->st_msgs_sent);
+ MAKE_STAT_LIST("New site messages", sp->st_newsites);
+ MAKE_STAT_LIST("Transmission limited", sp->st_nthrottles);
+ MAKE_STAT_LIST("Outdated conditions", sp->st_outdated);
+ MAKE_STAT_LIST("Transactions applied", sp->st_txns_applied);
+ MAKE_STAT_LIST("Elections held", sp->st_elections);
+ MAKE_STAT_LIST("Elections won", sp->st_elections_won);
+ MAKE_STAT_LIST("Election phase", sp->st_election_status);
+ MAKE_STAT_LIST("Election winner", sp->st_election_cur_winner);
+ MAKE_STAT_LIST("Election generation number", sp->st_election_gen);
+ MAKE_STAT_LSN("Election max LSN", &sp->st_election_lsn);
+ MAKE_STAT_LIST("Election sites", sp->st_election_nsites);
+ MAKE_STAT_LIST("Election priority", sp->st_election_priority);
+ MAKE_STAT_LIST("Election tiebreaker", sp->st_election_tiebreaker);
+ MAKE_STAT_LIST("Election votes", sp->st_election_votes);
+
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_txn.c b/storage/bdb/tcl/tcl_txn.c
new file mode 100644
index 00000000000..b5fab637943
--- /dev/null
+++ b/storage/bdb/tcl/tcl_txn.c
@@ -0,0 +1,657 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_txn.c,v 11.57 2002/08/06 06:21:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+static int tcl_TxnCommit __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST *, DB_TXN *, DBTCL_INFO *));
+static int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST *));
+
+/*
+ * _TxnInfoDelete --
+ * Removes nested txn info structures that are children
+ * of this txn.
+ * RECURSIVE: Transactions can be arbitrarily nested, so we
+ * must recurse down until we get them all.
+ *
+ * PUBLIC: void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_TxnInfoDelete(interp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ DBTCL_INFO *txnip; /* Info for txn */
+{
+ DBTCL_INFO *nextp, *p;
+
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ /*
+ * Check if this info structure "belongs" to this
+ * txn. Remove its commands and info structure.
+ */
+ nextp = LIST_NEXT(p, entries);
+ if (p->i_parent == txnip && p->i_type == I_TXN) {
+ _TxnInfoDelete(interp, p);
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+}
+
+/*
+ * tcl_TxnCheckpoint --
+ *
+ * PUBLIC: int tcl_TxnCheckpoint __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnCheckpoint(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ static char *txnckpopts[] = {
+ "-kbyte", "-min",
+ NULL
+ };
+ enum txnckpopts {
+ TXNCKP_KB, TXNCKP_MIN
+ };
+ int i, kb, min, optindex, result, ret;
+
+ result = TCL_OK;
+ kb = min = 0;
+
+ /*
+ * Get the flag index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnckpopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnckpopts)optindex) {
+ case TXNCKP_KB:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-kbyte kb?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &kb);
+ break;
+ case TXNCKP_MIN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-min min?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetIntFromObj(interp, objv[i++], &min);
+ break;
+ }
+ }
+ _debug_check();
+ ret = envp->txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn checkpoint");
+ return (result);
+}
+
+/*
+ * tcl_Txn --
+ *
+ * PUBLIC: int tcl_Txn __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_Txn(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ static char *txnopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-lock_timeout",
+ "-txn_timeout",
+#endif
+ "-nosync",
+ "-nowait",
+ "-parent",
+ "-sync",
+ NULL
+ };
+ enum txnopts {
+#if CONFIG_TEST
+ TXNDIRTY,
+ TXN_LOCK_TIMEOUT,
+ TXN_TIMEOUT,
+#endif
+ TXNNOSYNC,
+ TXNNOWAIT,
+ TXNPARENT,
+ TXNSYNC
+ };
+ DBTCL_INFO *ip;
+ DB_TXN *parent;
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ db_timeout_t lk_time, tx_time;
+ u_int32_t flag, lk_timeflag, tx_timeflag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE], newname[MSG_SIZE];
+
+ result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
+
+ parent = NULL;
+ flag = 0;
+ lk_timeflag = tx_timeflag = 0;
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ txnopts, "option", TCL_EXACT, &optindex) != TCL_OK) {
+ return (IS_HELP(objv[i]));
+ }
+ i++;
+ switch ((enum txnopts)optindex) {
+#ifdef CONFIG_TEST
+ case TXNDIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case TXN_LOCK_TIMEOUT:
+ lk_timeflag = DB_SET_LOCK_TIMEOUT;
+ goto getit;
+ case TXN_TIMEOUT:
+ tx_timeflag = DB_SET_TXN_TIMEOUT;
+getit:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)(optindex == TXN_LOCK_TIMEOUT ?
+ &lk_time : &tx_time));
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ break;
+#endif
+ case TXNNOSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOSYNC;
+ break;
+ case TXNNOWAIT:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOWAIT;
+ break;
+ case TXNPARENT:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-parent txn?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ parent = NAME_TO_TXN(arg);
+ if (parent == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Invalid parent txn: %s\n",
+ arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TXNSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_SYNC;
+ break;
+ }
+ }
+ snprintf(newname, sizeof(newname), "%s.txn%d",
+ envip->i_name, envip->i_envtxnid);
+ ip = _NewInfo(interp, NULL, newname, I_TXN);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_begin(envp, parent, &txn, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn");
+ if (result == TCL_ERROR)
+ _DeleteInfo(ip);
+ else {
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this txn.
+ */
+ envip->i_envtxnid++;
+ if (parent)
+ ip->i_parent = _PtrToInfo(parent);
+ else
+ ip->i_parent = envip;
+ _SetInfoData(ip, txn);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+ if (tx_timeflag != 0) {
+ ret = txn->set_timeout(txn, tx_time, tx_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
+ if (lk_timeflag != 0) {
+ ret = txn->set_timeout(txn, lk_time, lk_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
+ }
+ return (result);
+}
+
+/*
+ * tcl_TxnStat --
+ *
+ * PUBLIC: int tcl_TxnStat __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnStat(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ DBTCL_INFO *ip;
+ DB_TXN_ACTIVE *p;
+ DB_TXN_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t i;
+ int myobjc, result, ret;
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_LIST assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
+ MAKE_STAT_LSN("LSN of last checkpoint", &sp->st_last_ckp);
+ MAKE_STAT_LIST("Time of last checkpoint", sp->st_time_ckp);
+ MAKE_STAT_LIST("Last txn ID allocated", sp->st_last_txnid);
+ MAKE_STAT_LIST("Max Txns", sp->st_maxtxns);
+ MAKE_STAT_LIST("Number aborted txns", sp->st_naborts);
+ MAKE_STAT_LIST("Number active txns", sp->st_nactive);
+ MAKE_STAT_LIST("Maximum active txns", sp->st_maxnactive);
+ MAKE_STAT_LIST("Number txns begun", sp->st_nbegins);
+ MAKE_STAT_LIST("Number committed txns", sp->st_ncommits);
+ MAKE_STAT_LIST("Number restored txns", sp->st_nrestores);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ for (i = 0, p = sp->st_txnarray; i < sp->st_nactive; i++, p++)
+ for (ip = LIST_FIRST(&__db_infohead); ip != NULL;
+ ip = LIST_NEXT(ip, entries)) {
+ if (ip->i_type != I_TXN)
+ continue;
+ if (ip->i_type == I_TXN &&
+ (ip->i_txnp->id(ip->i_txnp) == p->txnid)) {
+ MAKE_STAT_LSN(ip->i_name, &p->lsn);
+ if (p->parentid != 0)
+ MAKE_STAT_STRLIST("Parent",
+ ip->i_parent->i_name);
+ else
+ MAKE_STAT_LIST("Parent", 0);
+ break;
+ }
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_TxnTimeout --
+ *
+ * PUBLIC: int tcl_TxnTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_TxnTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_TXN_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock timeout");
+ return (result);
+}
+
+/*
+ * txn_Cmd --
+ * Implements the "txn" widget.
+ */
+static int
+txn_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Txn handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *txncmds[] = {
+#if CONFIG_TEST
+ "discard",
+ "id",
+ "prepare",
+#endif
+ "abort",
+ "commit",
+ NULL
+ };
+ enum txncmds {
+#if CONFIG_TEST
+ TXNDISCARD,
+ TXNID,
+ TXNPREPARE,
+#endif
+ TXNABORT,
+ TXNCOMMIT
+ };
+ DBTCL_INFO *txnip;
+ DB_TXN *txnp;
+ Tcl_Obj *res;
+ int cmdindex, result, ret;
+ u_int8_t *gid;
+
+ Tcl_ResetResult(interp);
+ txnp = (DB_TXN *)clientData;
+ txnip = _PtrToInfo((void *)txnp);
+ result = TCL_OK;
+ if (txnp == NULL) {
+ Tcl_SetResult(interp, "NULL txn pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (txnip == NULL) {
+ Tcl_SetResult(interp, "NULL txn info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], txncmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum txncmds)cmdindex) {
+#if CONFIG_TEST
+ case TXNDISCARD:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->discard(txnp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn discard");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNID:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->id(txnp);
+ res = Tcl_NewIntObj(ret);
+ break;
+ case TXNPREPARE:
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ gid = (u_int8_t *)Tcl_GetByteArrayFromObj(objv[2], NULL);
+ ret = txnp->prepare(txnp, gid);
+ /*
+ * !!!
+ * DB_TXN->prepare commits all outstanding children. But it
+ * does NOT destroy the current txn handle. So, we must call
+ * _TxnInfoDelete to recursively remove all nested txn handles,
+ * we do not call _DeleteInfo on ourselves.
+ */
+ _TxnInfoDelete(interp, txnip);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn prepare");
+ break;
+#endif
+ case TXNABORT:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->abort(txnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn abort");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNCOMMIT:
+ result = tcl_TxnCommit(interp, objc, objv, txnp, txnip);
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+static int
+tcl_TxnCommit(interp, objc, objv, txnp, txnip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_TXN *txnp; /* Transaction pointer */
+ DBTCL_INFO *txnip; /* Info pointer */
+{
+ static char *commitopt[] = {
+ "-nosync",
+ "-sync",
+ NULL
+ };
+ enum commitopt {
+ COMSYNC,
+ COMNOSYNC
+ };
+ u_int32_t flag;
+ int optindex, result, ret;
+
+ COMPQUIET(txnip, NULL);
+
+ result = TCL_OK;
+ flag = 0;
+ if (objc != 2 && objc != 3) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ if (Tcl_GetIndexFromObj(interp, objv[2], commitopt,
+ "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[2]));
+ switch ((enum commitopt)optindex) {
+ case COMSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_SYNC;
+ break;
+ case COMNOSYNC:
+ FLAG_CHECK(flag);
+ flag = DB_TXN_NOSYNC;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = txnp->commit(txnp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn commit");
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_TxnRecover --
+ *
+ * PUBLIC: int tcl_TxnRecover __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_TxnRecover(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+#define DO_PREPLIST(count) \
+for (i = 0; i < count; i++) { \
+ snprintf(newname, sizeof(newname), "%s.txn%d", \
+ envip->i_name, envip->i_envtxnid); \
+ ip = _NewInfo(interp, NULL, newname, I_TXN); \
+ if (ip == NULL) { \
+ Tcl_SetResult(interp, "Could not set up info", \
+ TCL_STATIC); \
+ return (TCL_ERROR); \
+ } \
+ envip->i_envtxnid++; \
+ ip->i_parent = envip; \
+ p = &prep[i]; \
+ _SetInfoData(ip, p->txn); \
+ Tcl_CreateObjCommand(interp, newname, \
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)p->txn, NULL); \
+ result = _SetListElem(interp, res, newname, strlen(newname), \
+ p->gid, DB_XIDDATASIZE); \
+ if (result != TCL_OK) \
+ goto error; \
+}
+
+ DBTCL_INFO *ip;
+ DB_PREPLIST prep[DBTCL_PREP], *p;
+ Tcl_Obj *res;
+ long count, i;
+ int result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_recover(envp, prep, DBTCL_PREP, &count, DB_FIRST);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewObj();
+ DO_PREPLIST(count);
+
+ /*
+ * If count returned is the maximum size we have, then there
+ * might be more. Keep going until we get them all.
+ */
+ while (count == DBTCL_PREP) {
+ ret = envp->txn_recover(
+ envp, prep, DBTCL_PREP, &count, DB_NEXT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ DO_PREPLIST(count);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
+ return (result);
+}
+#endif
diff --git a/storage/bdb/tcl/tcl_util.c b/storage/bdb/tcl/tcl_util.c
new file mode 100644
index 00000000000..3c0665f9e38
--- /dev/null
+++ b/storage/bdb/tcl/tcl_util.c
@@ -0,0 +1,381 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_util.c,v 11.35 2002/08/06 06:21:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+/*
+ * bdb_RandCommand --
+ * Implements rand* functions.
+ *
+ * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_RandCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *rcmds[] = {
+ "rand", "random_int", "srand",
+ NULL
+ };
+ enum rcmds {
+ RRAND, RRAND_INT, RSRAND
+ };
+ long t;
+ int cmdindex, hi, lo, result, ret;
+ Tcl_Obj *res;
+ char msg[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum rcmds)cmdindex) {
+ case RRAND:
+ /*
+ * Must be 0 args. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ ret = rand();
+ res = Tcl_NewIntObj(ret);
+ break;
+ case RRAND_INT:
+ /*
+ * Must be 4 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lo hi");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &hi);
+ if (result == TCL_OK) {
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ if (t > RAND_MAX) {
+ snprintf(msg, MSG_SIZE,
+ "Max random is higher than %ld\n",
+ (long)RAND_MAX);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ res = Tcl_NewIntObj(ret);
+ }
+ break;
+ case RSRAND:
+ /*
+ * Must be 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "seed");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result == TCL_OK) {
+ srand((u_int)lo);
+ res = Tcl_NewIntObj(0);
+ }
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * tcl_Mutex --
+ * Opens an env mutex.
+ *
+ * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
+ * PUBLIC: DBTCL_INFO *));
+ */
+int
+tcl_Mutex(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ _MUTEX_DATA *md;
+ int i, mode, nitems, result, ret;
+ char newname[MSG_SIZE];
+
+ md = NULL;
+ result = TCL_OK;
+ mode = nitems = ret = 0;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "mode nitems");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &mode);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ result = Tcl_GetIntFromObj(interp, objv[3], &nitems);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+
+ snprintf(newname, sizeof(newname),
+ "%s.mutex%d", envip->i_name, envip->i_envmutexid);
+ ip = _NewInfo(interp, NULL, newname, I_MUTEX);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ /*
+ * Set up mutex.
+ */
+ /*
+ * Map in the region.
+ *
+ * XXX
+ * We don't bother doing this "right", i.e., using the shalloc
+ * functions, just grab some memory knowing that it's correctly
+ * aligned.
+ */
+ _debug_check();
+ if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0)
+ goto posixout;
+ md->env = envp;
+ md->n_mutex = nitems;
+ md->size = sizeof(_MUTEX_ENTRY) * nitems;
+
+ md->reginfo.type = REGION_TYPE_MUTEX;
+ md->reginfo.id = INVALID_REGION_TYPE;
+ md->reginfo.mode = mode;
+ md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK;
+ if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0)
+ goto posixout;
+ md->marray = md->reginfo.addr;
+
+ /* Initialize a created region. */
+ if (F_ISSET(&md->reginfo, REGION_CREATE))
+ for (i = 0; i < nitems; i++) {
+ md->marray[i].val = 0;
+ if ((ret = __db_mutex_init_int(envp,
+ &md->marray[i].m, i, 0)) != 0)
+ goto posixout;
+ }
+ R_UNLOCK(envp, &md->reginfo);
+
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mutex.
+ */
+ envip->i_envmutexid++;
+ ip->i_parent = envip;
+ _SetInfoData(ip, md);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+ return (TCL_OK);
+
+posixout:
+ if (ret > 0)
+ Tcl_PosixError(interp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mutex");
+ _DeleteInfo(ip);
+
+ if (md != NULL) {
+ if (md->reginfo.addr != NULL)
+ (void)__db_r_detach(md->env,
+ &md->reginfo, F_ISSET(&md->reginfo, REGION_CREATE));
+ __os_free(md->env, md);
+ }
+ return (result);
+}
+
+/*
+ * mutex_Cmd --
+ * Implements the "mutex" widget.
+ */
+static int
+mutex_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mutex handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mxcmds[] = {
+ "close",
+ "get",
+ "getval",
+ "release",
+ "setval",
+ NULL
+ };
+ enum mxcmds {
+ MXCLOSE,
+ MXGET,
+ MXGETVAL,
+ MXRELE,
+ MXSETVAL
+ };
+ DB_ENV *dbenv;
+ DBTCL_INFO *envip, *mpip;
+ _MUTEX_DATA *mp;
+ Tcl_Obj *res;
+ int cmdindex, id, result, newval;
+
+ Tcl_ResetResult(interp);
+ mp = (_MUTEX_DATA *)clientData;
+ mpip = _PtrToInfo((void *)mp);
+ envip = mpip->i_parent;
+ dbenv = envip->i_envp;
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mxcmds)cmdindex) {
+ case MXCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)__db_r_detach(mp->env, &mp->reginfo, 0);
+ res = Tcl_NewIntObj(0);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ __os_free(mp->env, mp);
+ break;
+ case MXRELE:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_UNLOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGET:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_LOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGETVAL:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ res = Tcl_NewLongObj((long)mp->marray[id].val);
+ break;
+ case MXSETVAL:
+ /*
+ * Check for 2 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id val");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &newval);
+ if (result != TCL_OK)
+ break;
+ mp->marray[id].val = newval;
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
diff --git a/storage/bdb/test/archive.tcl b/storage/bdb/test/archive.tcl
new file mode 100644
index 00000000000..9b5e764b2b4
--- /dev/null
+++ b/storage/bdb/test/archive.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: archive.tcl,v 11.20 2002/04/30 19:21:21 sue Exp $
+#
+# Options are:
+# -checkrec <checkpoint frequency"
+# -dir <dbhome directory>
+# -maxfilesize <maxsize of log file>
+proc archive { args } {
+ global alphabet
+ source ./include.tcl
+
+ # Set defaults
+ set maxbsize [expr 8 * 1024]
+ set maxfile [expr 32 * 1024]
+ set checkrec 500
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set checkrec [lindex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ default {
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
+ return
+ }
+
+ }
+ }
+
+ # Clean out old log if it existed
+ puts "Archive: Log archive test"
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ set eflags "-create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_bad dbenv $dbenv NULL
+ error_check_good dbenv [is_substr $dbenv env] 1
+
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
+ # The basic test structure here is that we write a lot of log
+ # records (enough to fill up 100 log files; each log file it
+ # small). We take periodic checkpoints. Between each pair
+ # of checkpoints, we refer to 2 files, overlapping them each
+ # checkpoint. We also start transactions and let them overlap
+ # checkpoints as well. The pattern that we try to create is:
+ # ---- write log records----|||||--- write log records ---
+ # -T1 T2 T3 --- D1 D2 ------CHECK--- CT1 --- D2 D3 CD1 ----CHECK
+ # where TX is begin transaction, CTx is commit transaction, DX is
+ # open data file and CDx is close datafile.
+
+ set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
+ set nrecs $maxfile
+ set rec 0:$baserec
+
+ # Begin transaction and write a log record
+ set t1 [$dbenv txn]
+ error_check_good t1:txn_begin [is_substr $t1 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ error_check_bad l1:log_put [llength $l1] 0
+
+ set lsnlist [list [lindex $l1 0]]
+
+ set t2 [$dbenv txn]
+ error_check_good t2:txn_begin [is_substr $t2 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set t3 [$dbenv txn]
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set txnlist [list $t1 $t2 $t3]
+ set db1 [eval {berkdb_open} "-create -mode 0644 -hash -env $dbenv ar1"]
+ set db2 [eval {berkdb_open} "-create -mode 0644 -btree -env $dbenv ar2"]
+ set dbcount 3
+ set dblist [list $db1 $db2]
+
+ for { set i 1 } { $i <= $nrecs } { incr i } {
+ set rec $i:$baserec
+ set lsn [$dbenv log_put $rec]
+ error_check_bad log_put [llength $lsn] 0
+ if { [expr $i % $checkrec] == 0 } {
+ # Take a checkpoint
+ $dbenv txn_checkpoint
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
+ catch { archive_command -h $testdir -a } res_log_full
+ if { [string first db_archive $res_log_full] == 0 } {
+ set res_log_full ""
+ }
+ catch { archive_command -h $testdir } res_log
+ if { [string first db_archive $res_log] == 0 } {
+ set res_log ""
+ }
+ catch { archive_command -h $testdir -l } res_alllog
+ catch { archive_command -h $testdir -a -s } \
+ res_data_full
+ catch { archive_command -h $testdir -s } res_data
+ error_check_good nlogfiles [llength $res_alllog] \
+ [lindex [lindex [$logc get -last] 0] 0]
+ error_check_good logs_match [llength $res_log_full] \
+ [llength $res_log]
+ error_check_good data_match [llength $res_data_full] \
+ [llength $res_data]
+
+ # Check right number of log files
+ error_check_good nlogs [llength $res_log] \
+ [expr [lindex $lsnlist 0] - 1]
+
+ # Check that the relative names are a subset of the
+ # full names
+ set n 0
+ foreach x $res_log {
+ error_check_bad log_name_match:$res_log \
+ [string first $x \
+ [lindex $res_log_full $n]] -1
+ incr n
+ }
+
+ set n 0
+ foreach x $res_data {
+ error_check_bad log_name_match:$res_data \
+ [string first $x \
+ [lindex $res_data_full $n]] -1
+ incr n
+ }
+
+ # Begin/commit any transactions
+ set t [lindex $txnlist 0]
+ if { [string length $t] != 0 } {
+ error_check_good txn_commit:$t [$t commit] 0
+ set txnlist [lrange $txnlist 1 end]
+ }
+ set lsnlist [lrange $lsnlist 1 end]
+
+ if { [llength $txnlist] == 0 } {
+ set t1 [$dbenv txn]
+ error_check_bad tx_begin $t1 NULL
+ error_check_good \
+ tx_begin [is_substr $t1 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t2 [$dbenv txn]
+ error_check_bad tx_begin $t2 NULL
+ error_check_good \
+ tx_begin [is_substr $t2 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t3 [$dbenv txn]
+ error_check_bad tx_begin $t3 NULL
+ error_check_good \
+ tx_begin [is_substr $t3 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set txnlist [list $t1 $t2 $t3]
+ }
+
+ # Open/close some DB files
+ if { [expr $dbcount % 2] == 0 } {
+ set type "-hash"
+ } else {
+ set type "-btree"
+ }
+ set db [eval {berkdb_open} \
+ "-create -mode 0644 $type -env $dbenv ar$dbcount"]
+ error_check_bad db_open:$dbcount $db NULL
+ error_check_good db_open:$dbcount [is_substr $db db] 1
+ incr dbcount
+
+ lappend dblist $db
+ set db [lindex $dblist 0]
+ error_check_good db_close:$db [$db close] 0
+ set dblist [lrange $dblist 1 end]
+
+ }
+ }
+ # Commit any transactions still running.
+ puts "\tArchive.b: Commit any transactions still running."
+ foreach t $txnlist {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Close any files that are still open.
+ puts "\tArchive.c: Close open files."
+ foreach d $dblist {
+ error_check_good db_close:$db [$d close] 0
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
+ reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
+
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
+}
+
+proc min { a b } {
+ if {$a < $b} {
+ return $a
+ } else {
+ return $b
+ }
+}
diff --git a/storage/bdb/test/bigfile001.tcl b/storage/bdb/test/bigfile001.tcl
new file mode 100644
index 00000000000..78dcd940f5e
--- /dev/null
+++ b/storage/bdb/test/bigfile001.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile001.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/storage/bdb/test/bigfile002.tcl b/storage/bdb/test/bigfile002.tcl
new file mode 100644
index 00000000000..f3e6defeaba
--- /dev/null
+++ b/storage/bdb/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile002.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/byteorder.tcl b/storage/bdb/test/byteorder.tcl
new file mode 100644
index 00000000000..823ca46270d
--- /dev/null
+++ b/storage/bdb/test/byteorder.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: byteorder.tcl,v 11.12 2002/07/29 18:09:25 sue Exp $
+#
+# Byte Order Test
+# Use existing tests and run with both byte orders.
+proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
+ puts "Byteorder: $method $nentries"
+
+ eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
+}
diff --git a/storage/bdb/test/conscript.tcl b/storage/bdb/test/conscript.tcl
new file mode 100644
index 00000000000..fd12c6e51a0
--- /dev/null
+++ b/storage/bdb/test/conscript.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: conscript.tcl,v 11.17 2002/03/22 21:43:06 krinsky Exp $
+#
+# Script for DB_CONSUME test (test070.tcl).
+# Usage: conscript dir file runtype nitems outputfile tnum args
+# dir: DBHOME directory
+# file: db file on which to operate
+# runtype: PRODUCE or CONSUME--which am I?
+# nitems: number of items to put or get
+# outputfile: where to log consumer results
+# tnum: test number
+
+proc consumescript_produce { db_cmd nitems tnum args } {
+ source ./include.tcl
+ global mydata
+
+ set pid [pid]
+ puts "\tTest0$tnum: Producer $pid starting, producing $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oret -1
+ set ret 0
+ for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
+ set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
+ set ret [$db put -append [chop_data q $mydata]]
+ error_check_good db_put \
+ [expr $ret > 0 ? $oret < $ret : \
+ $oret < 0 ? $oret < $ret : $oret > $ret] 1
+
+ }
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Producer $pid finished."
+}
+
+proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
+ source ./include.tcl
+ global mydata
+ set pid [pid]
+ puts "\tTest0$tnum: Consumer $pid starting, seeking $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oid [open $outputfile w]
+
+ for { set ndx 0 } { $ndx < $nitems } { } {
+ set ret [$db get $mode]
+ if { [llength $ret] > 0 } {
+ error_check_good correct_data:$pid \
+ [lindex [lindex $ret 0] 1] [pad_data q $mydata]
+ set rno [lindex [lindex $ret 0] 0]
+ puts $oid $rno
+ incr ndx
+ } else {
+ # No data to consume; wait.
+ }
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Consumer $pid finished."
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+set usage "conscript.tcl dir file runtype nitems outputfile tnum"
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set file [lindex $argv 1]
+set runtype [lindex $argv 2]
+set nitems [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set tnum [lindex $argv 5]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 6 end] 0]
+
+set mydata "consumer data"
+
+# Open env
+set dbenv [berkdb_env -home $dir ]
+error_check_good db_env_create [is_valid_env $dbenv] TRUE
+
+# Figure out db opening command.
+set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
+ $dbenv $args $file]
+
+# Invoke consumescript_produce or consumescript_consume based on $runtype
+if { $runtype == "PRODUCE" } {
+ # Producers have nothing to log; make sure outputfile is null.
+ error_check_good no_producer_outputfile $outputfile ""
+ consumescript_produce $db_cmd $nitems $tnum $args
+} elseif { $runtype == "CONSUME" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
+} elseif { $runtype == "WAIT" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
+ $args
+} else {
+ error_check_good bad_args $runtype "either PRODUCE, CONSUME or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/storage/bdb/test/dbm.tcl b/storage/bdb/test/dbm.tcl
new file mode 100644
index 00000000000..a392c7a9f3a
--- /dev/null
+++ b/storage/bdb/test/dbm.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbm.tcl,v 11.15 2002/01/11 15:53:19 bostic Exp $
+#
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
+proc dbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "DBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/dbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good dbminit [berkdb dbminit $testfile] 0
+ set did [open $dict]
+
+ set flags ""
+ set txn ""
+ set count 0
+ set skippednullkey 0
+
+ puts "\tDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # DBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [berkdb store $str $str]
+ error_check_good dbm_store $ret 0
+
+ set d [berkdb fetch $str]
+ error_check_good dbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tDBM.c: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good dbminit2 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tDBM.d: sequential scan and delete"
+
+ error_check_good dbminit3 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set ret [berkdb delete $key]
+ error_check_good dbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ error_check_good "dbm_close" [berkdb dbmclose] 0
+}
diff --git a/storage/bdb/test/dbscript.tcl b/storage/bdb/test/dbscript.tcl
new file mode 100644
index 00000000000..5decc493e9e
--- /dev/null
+++ b/storage/bdb/test/dbscript.tcl
@@ -0,0 +1,357 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbscript.tcl,v 11.14 2002/04/01 16:28:16 bostic Exp $
+#
+# Random db tester.
+# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
+# file: db file on which to operate
+# numops: number of operations to do
+# ncurs: number of cursors
+# min_del: minimum number of keys before you disable deletes.
+# max_add: maximum number of keys before you disable adds.
+# key_avg: average key size
+# data_avg: average data size
+# dups: 1 indicates dups allowed, 0 indicates no dups
+# errpct: What percent of operations should generate errors
+# seed: Random number generator seed (-1 means use pid)
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
+
+# Verify usage
+if { $argc != 10 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
+
+berkdb srand $rand_init
+
+puts "Beginning execution for [pid]"
+puts "$file database"
+puts "$numops Operations"
+puts "$ncurs cursors"
+puts "$min_del keys before deletes allowed"
+puts "$max_add or fewer keys to add"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+if { $dups != 1 } {
+ puts "No dups"
+} else {
+ puts "Dups allowed"
+}
+puts "$errpct % Errors"
+
+flush stdout
+
+set db [berkdb_open $file]
+set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+# set method [$db get_type]
+set record_based [is_record_based $method]
+
+# Initialize globals including data
+global nkeys
+global l_keys
+global a_keys
+
+set nkeys [db_init $db 1]
+puts "Initial number of keys: $nkeys"
+
+set pflags ""
+set gflags ""
+set txn ""
+
+# Open the cursors
+set curslist {}
+for { set i 0 } { $i < $ncurs } { incr i } {
+ set dbc [$db cursor]
+ set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ lappend curslist $dbc
+
+}
+
+# On each iteration we're going to generate random keys and
+# data. We'll select either a get/put/delete operation unless
+# we have fewer than min_del keys in which case, delete is not
+# an option or more than max_add in which case, add is not
+# an option. The tcl global arrays a_keys and l_keys keep track
+# of key-data pairs indexed by key and a list of keys, accessed
+# by integer.
+set adds 0
+set puts 0
+set gets 0
+set dels 0
+set bad_adds 0
+set bad_puts 0
+set bad_gets 0
+set bad_dels 0
+
+for { set iter 0 } { $iter < $numops } { incr iter } {
+ set op [pick_op $min_del $max_add $nkeys]
+ set err [is_err $errpct]
+
+ # The op0's indicate that there aren't any duplicates, so we
+ # exercise regular operations. If dups is 1, then we'll use
+ # cursor ops.
+ switch $op$dups$err {
+ add00 {
+ incr adds
+
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ newpair $k [pad_data $method $data]
+ }
+ add01 {
+ incr bad_adds
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ add10 {
+ incr adds
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ if { [berkdb random_int 1 2] == 1 } {
+ # Add a new key
+ set k [random_data $key_avg 1 a_keys \
+ $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn \
+ {-keyfirst $k $data}]
+ newpair $k [pad_data $method $data]
+ } else {
+ # Add a new duplicate
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+
+ set op [pick_cursput]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn {$op $k $data}]
+ adddup $k [lindex $dbcinfo 2] $data
+ }
+ }
+ add11 {
+ # TODO
+ incr bad_adds
+ set ret 1
+ }
+ put00 {
+ incr puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn {$k $data}]
+ changepair $k [pad_data $method $data]
+ }
+ put01 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ put10 {
+ incr puts
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+
+ set ret [eval {$dbc put} $txn {-current $data}]
+ changedup $k [lindex $dbcinfo 2] $data
+ }
+ put11 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set dbc [$db cursor]
+ set ret [eval {$dbc put} $txn {-current $data}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ get00 {
+ incr gets
+ set k [random_key]
+ set val [eval {$db get} $txn {$k}]
+ set data [pad_data $method [lindex [lindex $val 0] 1]]
+ if { $data == $a_keys($k) } {
+ set ret 0
+ } else {
+ set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
+ }
+ # Get command requires no state change
+ }
+ get01 {
+ incr bad_gets
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db get} $txn {$k}]
+ # Error case so no change to data state
+ }
+ get10 {
+ incr gets
+ set dbcinfo [random_cursor $curslist]
+ if { [llength $dbcinfo] == 3 } {
+ set ret 0
+ else
+ set ret 0
+ }
+ # Get command requires no state change
+ }
+ get11 {
+ incr bad_gets
+ set k [random_key]
+ set dbc [$db cursor]
+ if { [berkdb random_int 1 2] == 1 } {
+ set dir -next
+ } else {
+ set dir -prev
+ }
+ set ret [eval {$dbc get} $txn {-next $k}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error and get case so no change to data state
+ }
+ del00 {
+ incr dels
+ set k [random_key]
+ set ret [eval {$db del} $txn {$k}]
+ rempair $k
+ }
+ del01 {
+ incr bad_dels
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db del} $txn {$k}]
+ # Error case so no change to data state
+ }
+ del10 {
+ incr dels
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set ret [eval {$dbc del} $txn]
+ remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
+ }
+ del11 {
+ incr bad_dels
+ set c [$db cursor]
+ set ret [eval {$c del} $txn]
+ set cerr [catch {error_check_good curs_close \
+ [$c close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ }
+ if { $err == 1 } {
+ # Verify failure.
+ set cerr [catch {error_check_good $op$dups$err:$k \
+ [is_substr Error $ret] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ } else {
+ # Verify success
+ set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ }
+
+ flush stdout
+}
+
+# Close cursors and file
+foreach i $curslist {
+ set r [$i close]
+ set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+}
+
+set r [$db close]
+set cerr [catch {error_check_good db_close:$db $r 0} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
+puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
+flush stdout
+
+filecheck $file $txn
+
+exit
diff --git a/storage/bdb/test/ddoyscript.tcl b/storage/bdb/test/ddoyscript.tcl
new file mode 100644
index 00000000000..5478a1a98e0
--- /dev/null
+++ b/storage/bdb/test/ddoyscript.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddoyscript.tcl,v 11.6 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/storage/bdb/test/ddscript.tcl b/storage/bdb/test/ddscript.tcl
new file mode 100644
index 00000000000..621906233a9
--- /dev/null
+++ b/storage/bdb/test/ddscript.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddscript.tcl,v 11.12 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddscript dir test lockerid objid numprocs
+# dir: DBHOME directory
+# test: Which test to run
+# lockerid: Lock id for this locker
+# objid: Object id to lock.
+# numprocs: Total number of processes running
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddscript dir test lockerid objid numprocs"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set tnum [ lindex $argv 1 ]
+set lockerid [ lindex $argv 2 ]
+set objid [ lindex $argv 3 ]
+set numprocs [ lindex $argv 4 ]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+puts [eval $tnum $myenv $lockerid $objid $numprocs]
+
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+
+exit
diff --git a/storage/bdb/test/dead001.tcl b/storage/bdb/test/dead001.tcl
new file mode 100644
index 00000000000..e9853a87e53
--- /dev/null
+++ b/storage/bdb/test/dead001.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead001.tcl,v 11.33 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -txn_timeout $timeout -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ # Windows needs files closed before deleting files, so pause a little
+ tclsleep 3
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/storage/bdb/test/dead002.tcl b/storage/bdb/test/dead002.tcl
new file mode 100644
index 00000000000..bc19e7127e5
--- /dev/null
+++ b/storage/bdb/test/dead002.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead002.tcl,v 11.23 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
+ source ./include.tcl
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -txn_timeout $timeout -lock_detect $lmode]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/storage/bdb/test/dead003.tcl b/storage/bdb/test/dead003.tcl
new file mode 100644
index 00000000000..48088e1427c
--- /dev/null
+++ b/storage/bdb/test/dead003.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead003.tcl,v 1.17 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set detects { oldest youngest }
+ puts "Dead003: Deadlock detector tests: $detects"
+
+ # Create the environment.
+ foreach d $detects {
+ env_cleanup $testdir
+ puts "\tDead003.a: creating environment for $d"
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir -lock -lock_detect $d]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead003: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path\
+ test_path/ddscript.tcl $testdir \
+ $t $locker $i $n >& \
+ $testdir/dead003.log.$i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl \
+ $testdir/dead003.log.$i $testdir \
+ $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead003.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n 0 $dead $clean $other
+ #
+ # If we get here we know we have the
+ # correct number of dead/clean procs, as
+ # checked by dead_check above. Now verify
+ # that the right process was the one.
+ puts "\tDead003: Verify $d locks were aborted"
+ set l ""
+ if { $d == "oldest" } {
+ set l [expr $n - 1]
+ }
+ if { $d == "youngest" } {
+ set l 0
+ }
+ set did [open $testdir/dead003.log.$l]
+ while { [gets $did val] != -1 } {
+ error_check_good check_abort \
+ $val 1
+ }
+ close $did
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead003.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/dead004.tcl b/storage/bdb/test/dead004.tcl
new file mode 100644
index 00000000000..f5306a0d892
--- /dev/null
+++ b/storage/bdb/test/dead004.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead004.tcl,v 11.11 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead004: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead004.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead004: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead004.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead004.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead004.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead004.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead004.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead004.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/dead005.tcl b/storage/bdb/test/dead005.tcl
new file mode 100644
index 00000000000..71be8b1713f
--- /dev/null
+++ b/storage/bdb/test/dead005.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead005.tcl,v 11.10 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
+ source ./include.tcl
+
+ puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
+ foreach t $tests {
+ puts "Dead005.$t: creating environment"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ minlocks { set to n }
+ maxlocks { set to m }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead005: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead005.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead005.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead005.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ minlocks {set f 0}
+ minwrites {set f 1}
+ maxlocks {set f [expr $n - 1]}
+ }
+ set did [open $testdir/dead005.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/storage/bdb/test/dead006.tcl b/storage/bdb/test/dead006.tcl
new file mode 100644
index 00000000000..b70e011fb74
--- /dev/null
+++ b/storage/bdb/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead006.tcl,v 1.4 2002/01/11 15:53:21 bostic Exp $
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/storage/bdb/test/dead007.tcl b/storage/bdb/test/dead007.tcl
new file mode 100644
index 00000000000..2b6a78cb4b9
--- /dev/null
+++ b/storage/bdb/test/dead007.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead007.tcl,v 1.3 2002/01/11 15:53:22 bostic Exp $
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/storage/bdb/test/env001.tcl b/storage/bdb/test/env001.tcl
new file mode 100644
index 00000000000..781029f6a5c
--- /dev/null
+++ b/storage/bdb/test/env001.tcl
@@ -0,0 +1,154 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env001.tcl,v 11.26 2002/05/08 19:01:43 margo Exp $
+#
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
+proc env001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile $testdir/env.db
+ set t1 $testdir/t1
+
+ puts "Env001: Test of environment remove interface."
+ env_cleanup $testdir
+
+ # Try opening without Create flag should error
+ puts "\tEnv001.a: Open without create (should fail)."
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
+ error_check_good env:fail [is_substr $ret "no such file"] 1
+
+ # Now try opening with create
+ puts "\tEnv001.b: Open with create."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # Make sure that close works.
+ puts "\tEnv001.c: Verify close."
+ error_check_good env:close:$env [$env close] 0
+
+ # Make sure we can reopen -- this doesn't work on Windows
+ # because if there is only one opener, the region disappears
+ # when it is closed. We can't do a second opener, because
+ # that will fail on HP-UX.
+ puts "\tEnv001.d: Remove on closed environments."
+ if { $is_windows_test != 1 } {
+ puts "\t\tEnv001.d.1: Verify re-open."
+ set env [berkdb_env -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # remove environment
+ puts "\t\tEnv001.d.2: Close environment."
+ error_check_good env:close [$env close] 0
+ puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
+ error_check_good \
+ envremove [berkdb envremove -force -home $testdir] 0
+ }
+
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ puts "\tEnv001.e: Remove on open environments."
+ puts "\t\tEnv001.e.1: Env is open by single proc,\
+ remove no force."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -home $testdir} ret]
+ error_check_good env:remove $stat 1
+ error_check_good env:close [$env close] 0
+ }
+
+ puts \
+ "\t\tEnv001.e.2: Env is open by single proc, remove with force."
+ # Now that envremove doesn't do a close, this won't work on Windows.
+ if { $is_windows_test != 1 && $is_hp_test != 1} {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -force -home $testdir} ret]
+ error_check_good env:remove(force) $ret 0
+ #
+ # Even though the underlying env is gone, we need to close
+ # the handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
+ }
+
+ puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
+ # should fail
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+ # First close our env, but leave remote open
+ error_check_good env:close [$env close] 0
+ catch {berkdb envremove -home $testdir} ret
+ error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
+ #
+ # even though it failed, $env is no longer valid, so remove it in
+ # the remote process
+ set remote_close [send_cmd $f1 "$remote_env close"]
+ error_check_good remote_close $remote_close 0
+
+ # exit remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+
+ puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
+ # You cannot do this on windows because you can't remove files that
+ # are open, so we skip this test for Windows. On UNIX, it should
+ # succeed
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ catch {berkdb envremove -force -home $testdir} ret
+ error_check_good envremove:2procs:force $ret 0
+ #
+ # We still need to close our handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
+
+ # Close down remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+ }
+
+ # Try opening in a different dir
+ puts "\tEnv001.f: Try opening env in another directory."
+ if { [file exists $testdir/NEWDIR] != 1 } {
+ file mkdir $testdir/NEWDIR
+ }
+ set eflags "-create -home $testdir/NEWDIR -mode 0644"
+ set env [eval {berkdb_env} $eflags]
+ error_check_bad env:open $env NULL
+ error_check_good env:close [$env close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/NEWDIR] 0
+
+ puts "\tEnv001 complete."
+}
diff --git a/storage/bdb/test/env002.tcl b/storage/bdb/test/env002.tcl
new file mode 100644
index 00000000000..89c44f63a12
--- /dev/null
+++ b/storage/bdb/test/env002.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env002.tcl,v 11.15 2002/02/20 16:35:20 sandstro Exp $
+#
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env002 { } {
+ # env002 is essentially just a small driver that runs
+ # env002_body--formerly the entire test--twice; once, it
+ # supplies a "home" argument to use with environment opens,
+ # and the second time it sets DB_HOME instead.
+ # Note that env002_body itself calls env002_run_test to run
+ # the body of the actual test and check for the presence
+ # of logs. The nesting, I hope, makes this test's structure simpler.
+
+ global env
+ source ./include.tcl
+
+ puts "Env002: set_lg_dir test."
+
+ puts "\tEnv002: Running with -home argument to berkdb_env."
+ env002_body "-home $testdir"
+
+ puts "\tEnv002: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env002_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv002: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env002_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env002_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set logdir "logs_in_here"
+
+ file mkdir $testdir/$logdir
+
+ # Set up full path to $logdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$logdir
+ set fulllogdir [pwd]
+ cd $curdir
+
+ env002_make_config $logdir
+
+ # Run the meat of the test.
+ env002_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # Run the test again
+ env002_run_test a 2 "absolute path, config file" $home_arg \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ file mkdir $testdir/$logdir
+ env002_run_test b 1 "relative path, db_config" "$home_arg \
+ -log_dir $logdir -data_dir ." \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ # absolute
+ file mkdir $fulllogdir
+ env002_run_test b 2 "absolute path, db_config" "$home_arg \
+ -log_dir $fulllogdir -data_dir ." \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/$logdir
+ env002_make_config $logdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -log_dir $testdir/bogus \
+ -data_dir ." $testdir/$logdir
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -log_dir $fulllogdir/bogus \
+ -data_dir ." $fulllogdir
+}
+
+proc env002_run_test { major minor msg env_args log_path} {
+ global testdir
+ set testfile "env002.db"
+
+ puts "\t\tEnv002.$major.$minor: $msg"
+
+ # Create an environment, with logging, and scribble some
+ # stuff in a [btree] database in it.
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set key "some_key"
+ set data "some_data"
+
+ error_check_good db_put \
+ [$db put $key [chop_data btree $data]] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now make sure the log file is where we want it to be.
+ error_check_good db_exists [file exists $testdir/$testfile] 1
+ error_check_good log_exists \
+ [file exists $log_path/log.0000000001] 1
+}
+
+proc env002_make_config { logdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_lg_dir $logdir"
+ close $cid
+}
diff --git a/storage/bdb/test/env003.tcl b/storage/bdb/test/env003.tcl
new file mode 100644
index 00000000000..c16b54dd5e0
--- /dev/null
+++ b/storage/bdb/test/env003.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env003.tcl,v 11.21 2002/08/08 15:38:06 bostic Exp $
+#
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env003 { } {
+ # env003 is essentially just a small driver that runs
+ # env003_body twice. First, it supplies a "home" argument
+ # to use with environment opens, and the second time it sets
+ # DB_HOME instead.
+ # Note that env003_body itself calls env003_run_test to run
+ # the body of the actual test.
+
+ global env
+ source ./include.tcl
+
+ puts "Env003: DB_TMP_DIR test."
+
+ puts "\tEnv003: Running with -home argument to berkdb_env."
+ env003_body "-home $testdir"
+
+ puts "\tEnv003: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env003_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv003: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env003_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+}
+
+proc env003_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set tmpdir "tmpfiles_in_here"
+ file mkdir $testdir/$tmpdir
+
+ # Set up full path to $tmpdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$tmpdir
+ set fulltmpdir [pwd]
+ cd $curdir
+
+ # Create DB_CONFIG
+ env003_make_config $tmpdir
+
+ # Run the meat of the test.
+ env003_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$tmpdir
+
+ env003_make_config $fulltmpdir
+
+ # Run the test again
+ env003_run_test a 2 "absolute path, config file" $home_arg \
+ $fulltmpdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ env003_run_test b 1 "relative path, db_config" "$home_arg \
+ -tmp_dir $tmpdir -data_dir ." \
+ $testdir/$tmpdir
+
+ # absolute paths
+ env003_run_test b 2 "absolute path, db_config" "$home_arg \
+ -tmp_dir $fulltmpdir -data_dir ." \
+ $fulltmpdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/bogus
+ env003_make_config $tmpdir
+
+ env003_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
+ $testdir/$tmpdir
+
+ file mkdir $fulltmpdir/bogus
+ env003_make_config $fulltmpdir
+
+ env003_run_test c 2 "absolute path, both db_config and file" \
+ "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
+ $fulltmpdir
+}
+
+proc env003_run_test { major minor msg env_args tmp_path} {
+ global testdir
+ global alphabet
+ global errorCode
+
+ puts "\t\tEnv003.$major.$minor: $msg"
+
+ # Create an environment and small-cached in-memory database to
+ # use.
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ set db [berkdb_open -env $dbenv -create -btree]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Fill the database with more than its cache can fit.
+ #
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
+
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+
+ # Can't remove temp file until db is closed on Windows.
+ error_check_good db_close [$db close] 0
+ fileremove -f $ret
+ error_check_good env_close [$dbenv close] 0
+
+}
+
+proc env003_make_config { tmpdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_tmp_dir $tmpdir"
+ close $cid
+}
diff --git a/storage/bdb/test/env004.tcl b/storage/bdb/test/env004.tcl
new file mode 100644
index 00000000000..e93a0d95308
--- /dev/null
+++ b/storage/bdb/test/env004.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env004.tcl,v 11.18 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
+proc env004 { } {
+ source ./include.tcl
+
+ set method "hash"
+ set omethod [convert_method $method]
+ set args [convert_args $method ""]
+
+ puts "Env004: Multiple data directory test."
+
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
+
+ # Create a config file
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_data_dir data1"
+ puts $cid "set_data_dir data2"
+ puts $cid "set_data_dir data3"
+ close $cid
+
+ # Now get pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ set e [berkdb_env -create -private -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ # Now call dbenv with config specified
+ set e [berkdb_env -create -private \
+ -data_dir . -data_dir data1 -data_dir data2 \
+ -data_dir data3 -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ env_cleanup $testdir
+}
+
+proc ddir_test { fulldir method e args } {
+ source ./include.tcl
+
+ set args [convert_args $args]
+ set omethod [convert_method $method]
+
+ # Now create one file in each directory
+ set db1 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Close the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+
+ # Now, reopen the files without complete pathnames and make
+ # sure that we find them.
+
+ set db1 [berkdb_open -env $e $fulldir/data1/datafile1.db]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [berkdb_open -env $e $fulldir/data2/datafile2.db]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [berkdb_open -env $e $fulldir/data3/datafile3.db]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Finally close all the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+}
diff --git a/storage/bdb/test/env005.tcl b/storage/bdb/test/env005.tcl
new file mode 100644
index 00000000000..03bb1b40b34
--- /dev/null
+++ b/storage/bdb/test/env005.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env005.tcl,v 11.15 2002/02/22 14:28:37 sandstro Exp $
+#
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
+proc env005 { } {
+ source ./include.tcl
+
+ puts "Env005: Uninitialized env subsystems test."
+
+ env_cleanup $testdir
+ puts "\tEnv005.a: Creating env with no subsystems."
+
+ set e [berkdb_env_noerr -create -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [berkdb_open -create -btree $testdir/env005.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rlist {
+ { "lock_detect" "Env005.b0"}
+ { "lock_get read 1 1" "Env005.b1"}
+ { "lock_id" "Env005.b2"}
+ { "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
+ { "log_archive" "Env005.c0"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
+ { "log_put record" "Env005.c4"}
+ { "log_stat" "Env005.c5"}
+ { "txn" "Env005.d0"}
+ { "txn_checkpoint" "Env005.d1"}
+ { "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $ret invalid] 1
+ }
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/storage/bdb/test/env006.tcl b/storage/bdb/test/env006.tcl
new file mode 100644
index 00000000000..48fc6982772
--- /dev/null
+++ b/storage/bdb/test/env006.tcl
@@ -0,0 +1,42 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env006.tcl,v 11.8 2002/01/11 15:53:23 bostic Exp $
+#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
+proc env006 { } {
+ source ./include.tcl
+
+ puts "Env006: Run underlying utilities."
+
+ set rlist {
+ { "db_archive" "Env006.a"}
+ { "db_checkpoint" "Env006.b"}
+ { "db_deadlock" "Env006.c"}
+ { "db_dump" "Env006.d"}
+ { "db_load" "Env006.e"}
+ { "db_printlog" "Env006.f"}
+ { "db_recover" "Env006.g"}
+ { "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
+ }
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+
+ puts "\t$msg: $cmd"
+
+ set stat [catch {exec $util_path/$cmd -?} ret]
+ error_check_good $cmd $stat 1
+
+ #
+ # Check for "usage", but only check "sage" so that
+ # we can handle either Usage or usage.
+ #
+ error_check_good $cmd.err [is_substr $ret sage] 1
+ }
+}
diff --git a/storage/bdb/test/env007.tcl b/storage/bdb/test/env007.tcl
new file mode 100644
index 00000000000..5748d2dbc89
--- /dev/null
+++ b/storage/bdb/test/env007.tcl
@@ -0,0 +1,223 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env007.tcl,v 11.21 2002/08/12 20:49:36 sandstro Exp $
+#
+# TEST env007
+# TEST Test various DB_CONFIG config file options.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
+proc env007 { } {
+ global errorInfo
+
+ # env007 is essentially just a small driver that runs
+ # env007_body twice. First, it supplies a "set" argument
+ # to use with environment opens, and the second time it sets
+ # DB_CONFIG instead.
+ # Note that env007_body itself calls env007_run_test to run
+ # the body of the actual test.
+
+ source ./include.tcl
+
+ puts "Env007: DB_CONFIG test."
+
+ #
+ # Test only those options we can easily check via stat
+ #
+ set rlist {
+ { " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
+ "txn_stat" "Max Txns"}
+ { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
+ "lock_stat" "Maximum locks"}
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.d: Max Objects" "lock_stat" "Maximum objects"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
+ "log_stat" "Log record cache size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
+ "log_stat" "Current log file size"}
+ }
+
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $rlist {
+ set envarg [lindex $item 0]
+ set configarg [lindex $item 1]
+ set envval [lindex $item 2]
+ set configval [lindex $item 3]
+ set msg [lindex $item 4]
+ set statcmd [lindex $item 5]
+ set statstr [lindex $item 6]
+
+ env_cleanup $testdir
+ # First verify using just env args
+ puts "\t$msg Environment argument only"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:0 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $envval
+ error_check_good envclose:0 [$env close] 0
+
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t$msg Config file only"
+ set env [eval $e]
+ error_check_good envopen:1 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:1 [$env close] 0
+
+ # First verify using just env args
+ puts "\t$msg Environment arg and config file"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:2 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:2 [$env close] 0
+ }
+
+ #
+ # Test all options. For all config options, write it out
+ # to the file and make sure we can open the env. We cannot
+ # necessarily check via stat that it worked but this execs
+ # the config file code itself.
+ #
+ set cfglist {
+ { "set_cachesize" "0 1048576 0" }
+ { "set_data_dir" "." }
+ { "set_flags" "db_cdb_alldb" }
+ { "set_flags" "db_direct_db" }
+ { "set_flags" "db_direct_log" }
+ { "set_flags" "db_nolocking" }
+ { "set_flags" "db_nommap" }
+ { "set_flags" "db_nopanic" }
+ { "set_flags" "db_overwrite" }
+ { "set_flags" "db_region_init" }
+ { "set_flags" "db_txn_nosync" }
+ { "set_flags" "db_txn_write_nosync" }
+ { "set_flags" "db_yieldcpu" }
+ { "set_lg_bsize" "65536" }
+ { "set_lg_dir" "." }
+ { "set_lg_max" "8388608" }
+ { "set_lg_regionmax" "65536" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_lk_detect" "db_lock_expire" }
+ { "set_lk_detect" "db_lock_maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" }
+ { "set_lk_detect" "db_lock_oldest" }
+ { "set_lk_detect" "db_lock_random" }
+ { "set_lk_detect" "db_lock_youngest" }
+ { "set_lk_max" "50" }
+ { "set_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "29" }
+ { "set_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" }
+ { "set_mp_mmapsize" "12582912" }
+ { "set_region_init" "1" }
+ { "set_shm_key" "15" }
+ { "set_tas_spins" "15" }
+ { "set_tmp_dir" "." }
+ { "set_tx_max" "31" }
+ { "set_txn_timeout" "100" }
+ { "set_verbose" "db_verb_chkpoint" }
+ { "set_verbose" "db_verb_deadlock" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_waitsfor" }
+ }
+
+ puts "\tEnv007.g: Config file settings"
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set env [eval $e]
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max" "db_xxx" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+ puts "\tEnv007.h: Config value errors"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "incorrect arguments for name-value pair"] 1
+ }
+
+ puts "\tEnv007.i: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
+}
+
+proc env007_check { env statcmd statstr testval } {
+ set stat [$env $statcmd]
+ set checked 0
+ foreach statpair $stat {
+ if {$checked == 1} {
+ break
+ }
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $statstr] != 0} {
+ set checked 1
+ error_check_good $statstr:ck $statval $testval
+ }
+ }
+ error_check_good $statstr:test $checked 1
+}
+
+proc env007_make_config { carg cval } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/storage/bdb/test/env008.tcl b/storage/bdb/test/env008.tcl
new file mode 100644
index 00000000000..dccdb41f612
--- /dev/null
+++ b/storage/bdb/test/env008.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env008.tcl,v 11.6 2002/02/22 14:29:34 sandstro Exp $
+#
+# TEST env008
+# TEST Test environments and subdirectories.
+proc env008 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ env_cleanup $testdir
+
+ set subdir 1/1
+ set subdir1 1/2
+ file mkdir $testdir/$subdir $testdir/$subdir1
+ set testfile $subdir/env.db
+
+ puts "Env008: Test of environments and subdirectories."
+
+ puts "\tEnv008.a: Create env and db."
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv008.b: Remove db in subdir."
+ env008_db $env $testfile
+ error_check_good dbremove:$testfile \
+ [berkdb dbremove -env $env $testfile] 0
+
+ #
+ # Rather than remaking the db every time for the renames
+ # just move around the new file name to another new file
+ # name.
+ #
+ puts "\tEnv008.c: Rename db in subdir."
+ env008_db $env $testfile
+ set newfile $subdir/new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.d: Rename db to parent dir."
+ set newfile $subdir/../new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.e: Rename db to child dir."
+ set newfile $subdir/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.f: Rename db to another dir."
+ set newfile $subdir1/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+
+ error_check_good envclose [$env close] 0
+ puts "\tEnv008 complete."
+}
+
+proc env008_db { env testfile } {
+ set db [berkdb_open -env $env -create -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db put key data]
+ error_check_good dbput $ret 0
+ error_check_good dbclose [$db close] 0
+}
diff --git a/storage/bdb/test/env009.tcl b/storage/bdb/test/env009.tcl
new file mode 100644
index 00000000000..264d5e2dfec
--- /dev/null
+++ b/storage/bdb/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env009.tcl,v 11.5 2002/08/12 20:40:36 sandstro Exp $
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/storage/bdb/test/env010.tcl b/storage/bdb/test/env010.tcl
new file mode 100644
index 00000000000..4444e34e439
--- /dev/null
+++ b/storage/bdb/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env010.tcl,v 1.4 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/storage/bdb/test/env011.tcl b/storage/bdb/test/env011.tcl
new file mode 100644
index 00000000000..4061bb3fe51
--- /dev/null
+++ b/storage/bdb/test/env011.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env011.tcl,v 1.2 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} -auto_commit "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} -auto_commit "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/storage/bdb/test/hsearch.tcl b/storage/bdb/test/hsearch.tcl
new file mode 100644
index 00000000000..afeed93f74e
--- /dev/null
+++ b/storage/bdb/test/hsearch.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: hsearch.tcl,v 11.9 2002/01/11 15:53:24 bostic Exp $
+#
+# Historic Hsearch interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc hsearch { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "HSEARCH interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good hcreate [berkdb hcreate $nentries] 0
+ set did [open $dict]
+ set count 0
+
+ puts "\tHSEARCH.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ret [berkdb hsearch $str $str enter]
+ error_check_good hsearch:enter $ret 0
+
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+
+ puts "\tHSEARCH.b: re-get loop"
+ set did [open $dict]
+ # Here is the loop where we retrieve each key
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+ error_check_good hdestroy [berkdb hdestroy] 0
+}
diff --git a/storage/bdb/test/join.tcl b/storage/bdb/test/join.tcl
new file mode 100644
index 00000000000..87b0d1fae58
--- /dev/null
+++ b/storage/bdb/test/join.tcl
@@ -0,0 +1,455 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: join.tcl,v 11.21 2002/02/20 17:08:22 sandstro Exp $
+#
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
+proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
+ global testdir
+ global rand_init
+ source ./include.tcl
+
+ env_cleanup $testdir
+ berkdb srand $rand_init
+
+ # Use one environment for all database opens so we don't
+ # need oodles of regions.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # With the new offpage duplicate code, we don't support
+ # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
+ # is greater than one, run only with "-dup".
+ if { $with_dup_dups > 1 } {
+ set doptarray {"-dup"}
+ } else {
+ set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
+ }
+
+ # NB: these flags are internal only, ok
+ foreach m "DB_BTREE DB_HASH DB_BOTH" {
+ # run with two different random mixes.
+ foreach dopt $doptarray {
+ set opt [list "-env" $env $dopt]
+
+ puts "Join test: ($m $dopt) psize $psize,\
+ $with_dup_dups dup\
+ dups, flags $flags."
+
+ build_all $m $psize $opt oa $with_dup_dups
+
+ # null.db is db_built fifth but is referenced by
+ # zero; set up the option array appropriately.
+ set oa(0) $oa(5)
+
+ # Build the primary
+ puts "\tBuilding the primary database $m"
+ set oflags "-create -truncate -mode 0644 -env $env\
+ [conv $m [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join primary.db "1 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "0 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 0 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 2 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups "3" $str2
+
+ # You really don't want to run this section
+ # with $with_dup_dups > 2.
+ if { $with_dup_dups <= 2 } {
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2 3" $str\
+ oa $flags $with_dup_dups "3 3 1" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "4 0 2" $str\
+ oa $flags $with_dup_dups "4 3 3" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "3 2 1" $str\
+ oa $flags $with_dup_dups "0 2" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "1 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "0 0 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "2 4 4" $str
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "0 0 4 4" $str
+ }
+ close $did
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
+ global testdir
+ db_build join1.db $nentries 50 1 [conv $method 1]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join2.db $nentries 25 2 [conv $method 2]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join3.db $nentries 16 3 [conv $method 3]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join4.db $nentries 12 4 [conv $method 4]\
+ $psize $opt $oaname $with_dup_dups
+ db_build null.db $nentries 0 5 [conv $method 5]\
+ $psize $opt $oaname $with_dup_dups
+}
+
+proc conv { m i } {
+ switch -- $m {
+ DB_HASH { return "-hash"}
+ "-hash" { return "-hash"}
+ DB_BTREE { return "-btree"}
+ "-btree" { return "-btree"}
+ DB_BOTH {
+ if { [expr $i % 2] == 0 } {
+ return "-hash";
+ } else {
+ return "-btree";
+ }
+ }
+ }
+}
+
+proc random_opts { } {
+ set j [berkdb random_int 0 1]
+ if { $j == 0 } {
+ return " -dup"
+ } else {
+ return " -dup -dupsort"
+ }
+}
+
+proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
+ with_dup_dups } {
+ source ./include.tcl
+
+ # Get array of arg names (from two levels up the call stack)
+ upvar 2 $oaname oa
+
+ # Search for "RANDOMMIX" in $opt, and if present, replace
+ # with " -dup" or " -dup -dupsort" at random.
+ set i [lsearch $lopt RANDOMMIX]
+ if { $i != -1 } {
+ set lopt [lreplace $lopt $i $i [random_opts]]
+ }
+
+ # Save off db_open arguments for this database.
+ set opt [eval concat $lopt]
+ set oa($dup_interval) $opt
+
+ # Create the database and open the dictionary
+ set oflags "-create -truncate -mode 0644 $method\
+ -pagesize $psize"
+ set db [eval {berkdb_open} $oflags $opt $name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ puts -nonewline "\tBuilding $name: $nkeys keys "
+ puts -nonewline "with $ndups duplicates at interval of $dup_interval"
+ if { $with_dup_dups > 0 } {
+ puts ""
+ puts "\t\tand $with_dup_dups duplicate duplicates."
+ } else {
+ puts "."
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ set str $str$name
+ # We need to make sure that the dups are inserted in a
+ # random, or near random, order. Do this by generating
+ # them and putting each in a list, then sorting the list
+ # at random.
+ set duplist {}
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ lappend duplist $data
+ }
+ # randomize the list
+ for { set i 0 } { $i < $ndups } {incr i } {
+ # set j [berkdb random_int $i [expr $ndups - 1]]
+ set j [expr ($i % 2) + $i]
+ if { $j >= $ndups } { set j $i }
+ set dupi [lindex $duplist $i]
+ set dupj [lindex $duplist $j]
+ set duplist [lreplace $duplist $i $i $dupj]
+ set duplist [lreplace $duplist $j $j $dupi]
+ }
+ foreach data $duplist {
+ if { $with_dup_dups != 0 } {
+ for { set j 0 }\
+ { $j < $with_dup_dups }\
+ {incr j} {
+ set ret [$db put $str $data]
+ error_check_good put$j $ret 0
+ }
+ } else {
+ set ret [$db put $str $data]
+ error_check_good put $ret 0
+ }
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str NODUP]
+ error_check_good put $ret 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
+ global testdir
+ source ./include.tcl
+
+ upvar $oanm oa
+
+ puts -nonewline "\tJoining: $dbs on $key"
+ if { $dbs2 == "" } {
+ puts ""
+ } else {
+ puts " with $dbs2 on $key2"
+ }
+
+ # Open all the databases
+ set p [berkdb_open -unknown $testdir/$primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ set ndx [llength $dbs]
+
+ foreach i [concat $dbs $dbs2] {
+ set opt $oa($i)
+ set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
+ error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
+ set curs [$db cursor]
+ error_check_good "$db cursor" \
+ [is_substr $curs "$db.c"] 1
+ lappend dblist $db
+ lappend curslist $curs
+
+ if { $ndx > 0 } {
+ set realkey [concat $key[n_to_name $i]]
+ } else {
+ set realkey [concat $key2[n_to_name $i]]
+ }
+
+ set pair [$curs get -set $realkey]
+ error_check_good cursor_set:$realkey:$pair \
+ [llength [lindex $pair 0]] 2
+
+ incr ndx -1
+ }
+
+ set join_curs [eval {$p join} $curslist]
+ error_check_good join_cursor \
+ [is_substr $join_curs "$p.c"] 1
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n [concat $dbs $dbs2] {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ # If $with_dup_dups is greater than zero, each datum has
+ # been inserted $with_dup_dups times. So we expect the number
+ # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
+
+ if { $with_dup_dups > 0 } {
+ foreach n [concat $dbs $dbs2] {
+ set expected [expr $expected * $with_dup_dups]
+ }
+ }
+
+ set ndups 0
+ if { $flags == " -join_item"} {
+ set l 1
+ } else {
+ set flags ""
+ set l 2
+ }
+ for { set pair [eval {$join_curs get} $flags] } { \
+ [llength [lindex $pair 0]] == $l } {
+ set pair [eval {$join_curs get} $flags] } {
+ set k [lindex [lindex $pair 0] 0]
+ foreach i $dbs {
+ error_check_bad valid_dup:$i:$dbs $i 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
+ }
+ incr ndups
+ }
+ error_check_good number_of_dups:$dbs $ndups $expected
+
+ error_check_good close_primary [$p close] 0
+ foreach i $curslist {
+ error_check_good close_cursor:$i [$i close] 0
+ }
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_name { n } {
+global testdir
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return join$n.db;
+ }
+}
+
+proc gcd { a b } {
+ set g 1
+
+ for { set i 2 } { $i <= $a } { incr i } {
+ if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
+ set g $i
+ }
+ }
+ return $g
+}
diff --git a/storage/bdb/test/lock001.tcl b/storage/bdb/test/lock001.tcl
new file mode 100644
index 00000000000..1afcc471fc1
--- /dev/null
+++ b/storage/bdb/test/lock001.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock001.tcl,v 11.19 2002/04/25 19:30:28 sue Exp $
+#
+
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} {maxlocks 1000} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ # Set defaults
+ # Adjusted to make exact match of isqrt
+ #set conflicts { 3 0 0 0 0 0 1 0 1 1}
+ #set conflicts { 3 0 0 0 0 1 0 1 1}
+
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir -mode 0644 \
+ -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
+
+ puts "Lock001: test basic lock operations"
+ set locker [$env lock_id]
+ # Get and release each type of lock
+ puts "\tLock001.a: get and release each type of lock"
+ foreach m {ng write read} {
+ set obj obj$m
+ set lockp [$env lock_get $m $locker $obj]
+ error_check_good lock_get:a [is_blocked $lockp] 0
+ error_check_good lock_get:a [is_substr $lockp $env] 1
+ set ret [ $lockp put ]
+ error_check_good lock_put $ret 0
+ }
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
+ foreach m {ng write read} {
+ set lockp [$env lock_get $m $locker $obj ]
+ lappend locklist $lockp
+ error_check_good lock_get:b [is_blocked $lockp] 0
+ error_check_good lock_get:b [is_substr $lockp $env] 1
+ }
+ release_list $locklist
+
+ set locklist {}
+ # Check that reference counted locks work
+ puts "\tLock001.c: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_get -nowait write $locker $obj]
+ error_check_good lock_get:c [is_blocked $lockp] 0
+ error_check_good lock_get:c [is_substr $lockp $env] 1
+ lappend locklist $lockp
+ }
+ release_list $locklist
+
+ # Finally try some failing locks
+ set locklist {}
+ foreach i {ng write read} {
+ set lockp [$env lock_get $i $locker $obj]
+ lappend locklist $lockp
+ error_check_good lock_get:d [is_blocked $lockp] 0
+ error_check_good lock_get:d [is_substr $lockp $env] 1
+ }
+
+ # Change the locker
+ set locker [$env lock_id]
+ set blocklist {}
+ # Skip NO_LOCK lock.
+ puts "\tLock001.d: Change the locker, acquire read and write."
+ foreach i {write read} {
+ catch {$env lock_get -nowait $i $locker $obj} ret
+ error_check_good lock_get:e [is_substr $ret "not granted"] 1
+ #error_check_good lock_get:e [is_substr $lockp $env] 1
+ #error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+ # Now release original locks
+ release_list $locklist
+
+ # Now re-acquire blocking locks
+ set locklist {}
+ puts "\tLock001.e: Re-acquire blocking locks."
+ foreach i {write read} {
+ set lockp [$env lock_get -nowait $i $locker $obj ]
+ error_check_good lock_get:f [is_substr $lockp $env] 1
+ error_check_good lock_get:f [is_blocked $lockp] 0
+ lappend locklist $lockp
+ }
+
+ # Now release new locks
+ release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
+
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/storage/bdb/test/lock002.tcl b/storage/bdb/test/lock002.tcl
new file mode 100644
index 00000000000..a1ad8760c9d
--- /dev/null
+++ b/storage/bdb/test/lock002.tcl
@@ -0,0 +1,157 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock002.tcl,v 11.19 2002/04/25 19:30:29 sue Exp $
+#
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
+proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
+ source ./include.tcl
+
+ puts "Lock002: Basic multi-process lock tests."
+
+ env_cleanup $testdir
+
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Open the lock
+ mlock_open $maxlocks $nmodes $conflicts
+ mlock_wait
+}
+
+# Make sure that we can create a region; destroy it, attach to it,
+# detach from it, etc.
+proc mlock_open { maxl nmodes conflicts } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "\tLock002.a multi-process open/close test"
+
+ # Open/Create region here. Then close it and try to open from
+ # other test process.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Open from other test process
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Now make sure that we can reopen the region.
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Try closing the remote region
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:lock_close $ret 0
+
+ # Try opening for create. Will succeed because region exists.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good remote:env_open [is_valid_env $local_env] TRUE
+
+ # close locally
+ reset_env $local_env
+
+ # Close and exit remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ catch { close $f1 } result
+}
+
+proc mlock_wait { } {
+ source ./include.tcl
+
+ puts "\tLock002.b multi-process get/put wait test"
+
+ # Open region locally
+ set env_cmd "berkdb_env -lock -home $testdir"
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ # Open region remotely
+ set f1 [open |$tclsh_path r+]
+
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Get a write lock locally; try for the read lock
+ # remotely. We hold the locks for several seconds
+ # so that we can use timestamps to figure out if the
+ # other process waited.
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now request a lock that we expect to hang; generate
+ # timestamps so we can tell if it actually hangs.
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
+ set remote_lock [send_timed_cmd $f1 1 \
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
+
+ # Now sleep before releasing lock
+ tclsleep 5
+ set result [$local_lock put]
+ error_check_good lock_put $result 0
+
+ # Now get the result from the other script
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Now get the remote lock
+ set remote_lock [send_cmd $f1 "puts \$lock"]
+ error_check_good remote:lock_get \
+ [is_valid_lock $remote_lock $remote_env] TRUE
+
+ # Now make the other guy wait 5 second and then release his
+ # lock while we try to get a write lock on it
+ set start [timestamp -r]
+
+ set ret [send_cmd $f1 "tclsleep 5"]
+
+ set ret [send_cmd $f1 "$remote_lock put"]
+
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get:time \
+ [expr [expr [timestamp -r] - $start] > 2] 1
+ error_check_good lock_get:local \
+ [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now check remote's result
+ set result [rcv_result $f1]
+ error_check_good lock_put:remote $result 0
+
+ # Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ close $f1
+
+ # Now close up locally
+ set ret [$local_lock put]
+ error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
+
+ reset_env $local_env
+}
diff --git a/storage/bdb/test/lock003.tcl b/storage/bdb/test/lock003.tcl
new file mode 100644
index 00000000000..91a8a2e90f6
--- /dev/null
+++ b/storage/bdb/test/lock003.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock003.tcl,v 11.25 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
+
+ puts "Lock003: Multi-process random lock test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
+
+ error_check_good env_close [$e close] 0
+
+ # Now spawn off processes
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { [llength $seeds] == $procs } {
+ set s [lindex $seeds $i]
+ }
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ lockscript.tcl $testdir/lock003.$i.out \
+ $testdir $iter $objs $wait $ldegree $reads &]
+ lappend pidlist $p
+ }
+
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/lock003.*.out]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Remove log files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
+ }
+}
+
diff --git a/storage/bdb/test/lock004.tcl b/storage/bdb/test/lock004.tcl
new file mode 100644
index 00000000000..7fd51ee42f2
--- /dev/null
+++ b/storage/bdb/test/lock004.tcl
@@ -0,0 +1,29 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock004.tcl,v 11.5 2002/04/25 19:30:30 sue Exp $
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/storage/bdb/test/lock005.tcl b/storage/bdb/test/lock005.tcl
new file mode 100644
index 00000000000..5afe7344d36
--- /dev/null
+++ b/storage/bdb/test/lock005.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock005.tcl,v 1.7 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "Lock not granted"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/storage/bdb/test/lockscript.tcl b/storage/bdb/test/lockscript.tcl
new file mode 100644
index 00000000000..812339a4a70
--- /dev/null
+++ b/storage/bdb/test/lockscript.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lockscript.tcl,v 11.17 2002/02/20 17:08:23 sandstro Exp $
+#
+# Random lock tester.
+# Usage: lockscript dir numiters numobjs sleepint degree readratio
+# dir: lock directory.
+# numiters: Total number of iterations.
+# numobjs: Number of objects on which to lock.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+# readratio: Percent of locks that should be reads.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "lockscript dir numiters numobjs sleepint degree readratio"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set numobjs [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set readratio [ lindex $argv 5 ]
+
+# Initialize random number generator
+global rand_init
+berkdb srand $rand_init
+
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
+puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
+puts "$sleepint $degree $readratio"
+flush stdout
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set locklist {}
+ set objlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj $numobjs]
+ set lastobj [expr $obj + 1]
+ set x [berkdb random_int 1 100 ]
+ if { $x <= $readratio } {
+ set rw read
+ } else {
+ set rw write
+ }
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
+
+ # Do get; add to list
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
+ lappend locklist $lockp
+ if {$lastobj > $numobjs} {
+ break
+ }
+ }
+ # Pick sleep interval
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
+
+ # Now release locks
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
+ release_list $locklist
+ flush stdout
+}
+
+set ret [$e close]
+error_check_good env_close $ret 0
+
+puts "[timestamp -c] $locker Complete"
+flush stdout
+
+exit
diff --git a/storage/bdb/test/log001.tcl b/storage/bdb/test/log001.tcl
new file mode 100644
index 00000000000..87df780cb5a
--- /dev/null
+++ b/storage/bdb/test/log001.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log001.tcl,v 11.29 2002/04/30 20:27:56 sue Exp $
+#
+
+# TEST log001
+# TEST Read/write log records.
+proc log001 { } {
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+ set max [expr 1024 * 128]
+ log001_body $max $iter 1
+ log001_body $max $iter 0
+ log001_body $max $iter 1 "-encryptaes $passwd"
+ log001_body $max $iter 0 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 1
+ log001_body $max [expr $iter * 15] 0
+ log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
+}
+
+proc log001_body { max nrecs fixedlength {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records "
+ if { $fixedlength == 1 } {
+ puts "(fixed-length $encargs)"
+ } else {
+ puts "(variable-length $encargs)"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/storage/bdb/test/log002.tcl b/storage/bdb/test/log002.tcl
new file mode 100644
index 00000000000..6e91f55398f
--- /dev/null
+++ b/storage/bdb/test/log002.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log002.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+ set env [berkdb_env -create -home $testdir -mode 0644 \
+ -log -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/storage/bdb/test/log003.tcl b/storage/bdb/test/log003.tcl
new file mode 100644
index 00000000000..11297b59d50
--- /dev/null
+++ b/storage/bdb/test/log003.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log003.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb_env \
+ -create -log -home $testdir -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env \
+ -home $testdir -create -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/storage/bdb/test/log004.tcl b/storage/bdb/test/log004.tcl
new file mode 100644
index 00000000000..66968a8c1b4
--- /dev/null
+++ b/storage/bdb/test/log004.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log004.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/storage/bdb/test/log005.tcl b/storage/bdb/test/log005.tcl
new file mode 100644
index 00000000000..ab2ad703c55
--- /dev/null
+++ b/storage/bdb/test/log005.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log005.tcl,v 11.1 2002/05/30 22:16:49 bostic Exp $
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+ source ./include.tcl
+
+ puts "Log005: Check that log file sizes can change."
+ env_cleanup $testdir
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set env [berkdb_env \
+ -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/storage/bdb/test/logtrack.tcl b/storage/bdb/test/logtrack.tcl
new file mode 100644
index 00000000000..ad6b480b4e3
--- /dev/null
+++ b/storage/bdb/test/logtrack.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: logtrack.tcl,v 11.11 2002/09/03 16:44:37 sue Exp $
+#
+# logtrack.tcl: A collection of routines, formerly implemented in Perl
+# as log.pl, to track which log record types the test suite hits.
+
+set ltsname "logtrack_seen.db"
+set ltlist $test_path/logtrack.list
+set tmpname "logtrack_tmp"
+
+proc logtrack_clean { } {
+ global ltsname
+
+ file delete -force $ltsname
+
+ return
+}
+
+proc logtrack_init { } {
+ global ltsname
+
+ logtrack_clean
+
+ # Create an empty tracking database.
+ [berkdb_open -create -truncate -btree $ltsname] close
+
+ return
+}
+
+# Dump the logs for directory dirname and record which log
+# records were seen.
+proc logtrack_read { dirname } {
+ global ltsname tmpname util_path
+ global encrypt passwd
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+
+ file delete -force $tmpname
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
+ error_check_good printlog $ret 0
+ error_check_good tmpfile_exists [file exists $tmpname] 1
+
+ set f [open $tmpname r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
+ }
+ close $f
+ file delete -force $tmpname
+
+ error_check_good seendb_close [$seendb close] 0
+}
+
+# Print the log record types that were seen but should not have been
+# seen and the log record types that were not seen but should have been seen.
+proc logtrack_summary { } {
+ global ltsname ltlist testdir
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+ set existdb [berkdb_open -create -btree]
+ error_check_good existdb_open [is_valid_db $existdb] TRUE
+ set deprecdb [berkdb_open -create -btree]
+ error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
+
+ error_check_good ltlist_exists [file exists $ltlist] 1
+ set f [open $ltlist r]
+ set pref ""
+ while { [gets $f line] >= 0 } {
+ # Get the keyword, the first thing on the line:
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
+ set keyword [lindex $line 0]
+
+ if { [string compare $keyword PREFIX] == 0 } {
+ # New prefix.
+ set pref [lindex $line 1]
+ } elseif { [string compare $keyword BEGIN] == 0 } {
+ # A log type we care about; put it on our list.
+
+ # Skip noop and debug.
+ if { [string compare [lindex $line 1] noop] == 0 } {
+ continue
+ }
+ if { [string compare [lindex $line 1] debug] == 0 } {
+ continue
+ }
+
+ error_check_good exist_put [$existdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
+ error_check_good deprec_put [$deprecdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ }
+ }
+
+ error_check_good exist_curs \
+ [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$ec get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$seendb count $rec] == 0 } {
+ puts "FAIL: log record type $rec not seen"
+ }
+ }
+ error_check_good exist_curs_close [$ec close] 0
+
+ error_check_good seen_curs \
+ [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$sc get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$existdb count $rec] == 0 } {
+ if { [$deprecdb count $rec] == 0 } {
+ puts "FAIL: unknown log record type $rec seen"
+ } else {
+ puts "FAIL: deprecated log record type $rec seen"
+ }
+ }
+ }
+ error_check_good seen_curs_close [$sc close] 0
+
+ error_check_good seendb_close [$seendb close] 0
+ error_check_good existdb_close [$existdb close] 0
+ error_check_good deprecdb_close [$deprecdb close] 0
+
+ logtrack_clean
+}
diff --git a/storage/bdb/test/mdbscript.tcl b/storage/bdb/test/mdbscript.tcl
new file mode 100644
index 00000000000..9f3c971ee3c
--- /dev/null
+++ b/storage/bdb/test/mdbscript.tcl
@@ -0,0 +1,384 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mdbscript.tcl,v 11.29 2002/03/22 21:43:06 krinsky Exp $
+#
+# Process script for the multi-process db tester.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+global dbenv
+global klock
+global l_keys
+global procid
+global alphabet
+
+# In Tcl, when there are multiple catch handlers, *all* handlers
+# are called, so we have to resort to this hack.
+#
+global exception_handled
+
+set exception_handled 0
+
+set datastr $alphabet$alphabet
+
+# Usage: mdbscript dir file nentries iter procid procs seed
+# dir: DBHOME directory
+# file: db file on which to operate
+# nentries: number of entries taken from dictionary
+# iter: number of operations to run
+# procid: this processes' id number
+# procs: total number of processes running
+set usage "mdbscript method dir file nentries iter procid procs"
+
+# Verify usage
+if { $argc != 7 } {
+ puts "FAIL:[timestamp] test042: Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set dir [lindex $argv 1]
+set file [lindex $argv 2]
+set nentries [ lindex $argv 3 ]
+set iter [ lindex $argv 4 ]
+set procid [ lindex $argv 5 ]
+set procs [ lindex $argv 6 ]
+
+set pflags ""
+set gflags ""
+set txn ""
+
+set renum [is_rrecno $method]
+set omethod [convert_method $method]
+
+if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+}
+
+# Initialize seed
+global rand_init
+
+# We want repeatable results, but we also want each instance of mdbscript
+# to do something different. So we add the procid to the fixed seed.
+# (Note that this is a serial number given by the caller, not a pid.)
+berkdb srand [expr $rand_init + $procid]
+
+puts "Beginning execution for [pid] $method"
+puts "$dir db_home"
+puts "$file database"
+puts "$nentries data elements"
+puts "$iter iterations"
+puts "$procid process id"
+puts "$procs processes"
+
+set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
+flush stdout
+
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+set locker [ $dbenv lock_id ]
+
+set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Init globals (no data)
+set nkeys [db_init $db 0]
+puts "Initial number of keys: $nkeys"
+error_check_good db_init $nkeys $nentries
+tclsleep 5
+
+proc get_lock { k } {
+ global dbenv
+ global procid
+ global locker
+ global klock
+ global DB_LOCK_WRITE
+ global DB_LOCK_NOWAIT
+ global errorInfo
+ global exception_handled
+ # Make sure that the key isn't in the middle of
+ # a delete operation
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
+ set exception_handled 1
+
+ error_check_good \
+ get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
+ puts "Warning: key $k locked"
+ set klock NOLOCK
+ return 1
+ } else {
+ error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
+ }
+ return 0
+}
+
+# On each iteration we're going to randomly pick a key.
+# 1. We'll either get it (verifying that its contents are reasonable).
+# 2. Put it (using an overwrite to make the data be datastr:ID).
+# 3. Get it and do a put through the cursor, tacking our ID on to
+# 4. Get it, read forward some random number of keys.
+# 5. Get it, read forward some random number of keys and do a put (replace).
+# 6. Get it, read forward some random number of keys and do a del. And then
+# do a put of the key.
+set gets 0
+set getput 0
+set overwrite 0
+set seqread 0
+set seqput 0
+set seqdel 0
+set dlen [string length $datastr]
+
+for { set i 0 } { $i < $iter } { incr i } {
+ set op [berkdb random_int 0 5]
+ puts "iteration $i operation $op"
+ set close_cursor 0
+ if {[catch {
+ switch $op {
+ 0 {
+ incr gets
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ continue;
+ }
+
+ set rec [eval {$db get} $txn $gflags {$key}]
+ error_check_bad "$db get $key" [llength $rec] 0
+ set partial [string range \
+ [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
+ error_check_good \
+ "$db get $key" $partial [pad_data $method $datastr]
+ }
+ 1 {
+ incr overwrite
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ set data $datastr:$procid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good "$db put $key" $ret 0
+ }
+ 2 {
+ incr getput
+ set dbc [$db cursor -update]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 [expr $dlen - 1]]
+ error_check_good \
+ "$dbc get $key" $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put \
+ -current [chop_data $method $rec]]
+ error_check_good "$dbc put $key" $ret 0
+ error_check_good "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ 3 -
+ 4 -
+ 5 {
+ if { $op == 3 } {
+ set flags ""
+ } else {
+ set flags -update
+ }
+ set dbc [eval {$db cursor} $flags]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+
+ # Now read a few keys sequentially
+ set nloop [berkdb random_int 0 10]
+ if { [berkdb random_int 0 1] == 0 } {
+ set flags -next
+ } else {
+ set flags -prev
+ }
+ while { $nloop > 0 } {
+ set lastret $ret
+ set ret [eval {$dbc get} $flags]
+ # Might read beginning/end of file
+ if { [llength $ret] == 0} {
+ set ret $lastret
+ break
+ }
+ incr nloop -1
+ }
+ switch $op {
+ 3 {
+ incr seqread
+ }
+ 4 {
+ incr seqput
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 \
+ [expr $dlen - 1]]
+ error_check_good "$dbc get $key" \
+ $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put -current \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $key" $ret 0
+ }
+ 5 {
+ incr seqdel
+ set k [lindex [lindex $ret 0] 0]
+ # We need to lock the item we're
+ # deleting so that someone else can't
+ # try to do a get while we're
+ # deleting
+ error_check_good "$klock put" \
+ [$klock put] 0
+ set klock NOLOCK
+ set cur [$dbc get -current]
+ error_check_bad get_current \
+ [llength $cur] 0
+ set key [lindex [lindex $cur 0] 0]
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue
+ }
+ set ret [$dbc del]
+ error_check_good "$dbc del" $ret 0
+ set rec $datastr
+ append rec ":$procid"
+ if { $renum == 1 } {
+ set ret [$dbc put -before \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret $k
+ } elseif { \
+ [is_record_based $method] == 1 } {
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ set ret [$db put $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$db put $k" $ret 0
+ } else {
+ set ret [$dbc put -keylast $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret 0
+ }
+ }
+ }
+ if { $close_cursor == 1 } {
+ error_check_good \
+ "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+ global exception_handled;
+
+ puts $errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+
+ if { [string compare $klock NOLOCK] != 0 } {
+ catch {$klock put}
+ }
+ if {$close_cursor == 1} {
+ catch {$dbc close}
+ set close_cursor 0
+ }
+
+ if {[string first FAIL $theError] == 0 && \
+ $exception_handled != 1} {
+ flush stdout
+ error "FAIL:[timestamp] test042: key $k: $theError"
+ }
+ set exception_handled 0
+ } else {
+ if { [string compare $klock NOLOCK] != 0 } {
+ error_check_good "$klock put" [$klock put] 0
+ set klock NOLOCK
+ }
+ }
+}
+
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+
+flush stdout
+exit
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: "
+puts "\t$gets gets"
+puts "\t$overwrite overwrites"
+puts "\t$getput getputs"
+puts "\t$seqread seqread"
+puts "\t$seqput seqput"
+puts "\t$seqdel seqdel"
+flush stdout
diff --git a/storage/bdb/test/memp001.tcl b/storage/bdb/test/memp001.tcl
new file mode 100644
index 00000000000..c4bbf99b9b2
--- /dev/null
+++ b/storage/bdb/test/memp001.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp001.tcl,v 11.50 2002/08/07 16:46:28 bostic Exp $
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/storage/bdb/test/memp002.tcl b/storage/bdb/test/memp002.tcl
new file mode 100644
index 00000000000..d55f2987f06
--- /dev/null
+++ b/storage/bdb/test/memp002.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp002.tcl,v 11.47 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/storage/bdb/test/memp003.tcl b/storage/bdb/test/memp003.tcl
new file mode 100644
index 00000000000..31eb55b757c
--- /dev/null
+++ b/storage/bdb/test/memp003.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp003.tcl,v 11.46 2002/04/30 17:26:06 sue Exp $
+#
+
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/storage/bdb/test/mpoolscript.tcl b/storage/bdb/test/mpoolscript.tcl
new file mode 100644
index 00000000000..c13f70eb945
--- /dev/null
+++ b/storage/bdb/test/mpoolscript.tcl
@@ -0,0 +1,171 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mpoolscript.tcl,v 11.16 2002/04/29 14:47:16 sandstro Exp $
+#
+# Random multiple process mpool tester.
+# Usage: mpoolscript dir id numiters numfiles numpages sleepint
+# dir: lock directory.
+# id: Unique identifier for this process.
+# maxprocs: Number of procs in this test.
+# numiters: Total number of iterations.
+# pgsizes: Pagesizes for the different files. Length of this item indicates
+# how many files to use.
+# numpages: Number of pages per file.
+# sleepint: Maximum sleep interval.
+# flags: Flags for env open
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage \
+ "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set maxprocs [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+set pgsizes [ lindex $argv 4 ]
+set numpages [ lindex $argv 5 ]
+set sleepint [ lindex $argv 6 ]
+set flags [ lindex $argv 7]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+# Give time for all processes to start up.
+tclsleep 10
+
+puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
+puts " $pgsizes $numpages $sleepint"
+flush stdout
+
+# Figure out how small/large to make the cache
+set max 0
+foreach i $pgsizes {
+ if { $i > $max } {
+ set max $i
+ }
+}
+
+set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
+set e [eval $env_cmd $flags]
+error_check_good env_open [is_valid_env $e] TRUE
+
+# Now open files
+set mpools {}
+set nfiles 0
+foreach psize $pgsizes {
+ set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
+ error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
+ lappend mpools $mp
+ incr nfiles
+}
+
+puts "Establishing long-term pin on file 0 page $id for process $id"
+
+# Set up the long-pin page
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
+error_check_good lock_get [is_valid_lock $lock $e] TRUE
+
+set mp [lindex $mpools 0]
+set master_page [$mp get -create $id]
+error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
+
+set r [$master_page init MASTER$id]
+error_check_good page_init $r 0
+
+# Release the lock but keep the page pinned
+set r [$lock put]
+error_check_good lock_put $r 0
+
+# Main loop. On each iteration, we'll check every page in each of
+# of the files. On any file, if we see the appropriate tag in the
+# field, we'll rewrite the page, else we won't. Keep track of
+# how many pages we actually process.
+set pages 0
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ puts "[timestamp]: iteration $iter, $pages pages set so far"
+ flush stdout
+ for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
+ if { [expr $fnum % 2 ] == 0 } {
+ set pred [expr ($id + $maxprocs - 1) % $maxprocs]
+ } else {
+ set pred [expr ($id + $maxprocs + 1) % $maxprocs]
+ }
+
+ set mpf [lindex $mpools $fnum]
+ for { set p 0 } { $p < $numpages } { incr p } {
+ set lock [$e lock_get write $locker $fnum:$p]
+ error_check_good lock_get:$fnum:$p \
+ [is_valid_lock $lock $e] TRUE
+
+ # Now, get the page
+ set pp [$mpf get -create $p]
+ error_check_good page_get:$fnum:$p \
+ [is_valid_page $pp $mpf] TRUE
+
+ if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
+ # Set page to self.
+ set r [$pp init $id]
+ error_check_good page_init:$fnum:$p $r 0
+ incr pages
+ set r [$pp put -dirty]
+ error_check_good page_put:$fnum:$p $r 0
+ } else {
+ error_check_good page_put:$fnum:$p [$pp put] 0
+ }
+ error_check_good lock_put:$fnum:$p [$lock put] 0
+ }
+ }
+ tclsleep [berkdb random_int 1 $sleepint]
+}
+
+# Now verify your master page, release its pin, then verify everyone else's
+puts "$id: End of run verification of master page"
+set r [$master_page is_setto MASTER$id]
+error_check_good page_check $r 1
+set r [$master_page put -dirty]
+error_check_good page_put $r 0
+
+set i [expr ($id + 1) % $maxprocs]
+set mpf [lindex $mpools 0]
+
+while { $i != $id } {
+ set p [$mpf get -create $i]
+ error_check_good mp_get [is_valid_page $p $mpf] TRUE
+
+ if { [$p is_setto MASTER$i] != 1 } {
+ puts "Warning: Master page $i not set."
+ }
+ error_check_good page_put:$p [$p put] 0
+
+ set i [expr ($i + 1) % $maxprocs]
+}
+
+# Close files
+foreach i $mpools {
+ set r [$i close]
+ error_check_good mpf_close $r 0
+}
+
+# Close environment system
+set r [$e close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/storage/bdb/test/mutex001.tcl b/storage/bdb/test/mutex001.tcl
new file mode 100644
index 00000000000..93f858993a5
--- /dev/null
+++ b/storage/bdb/test/mutex001.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex001.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex001
+# TEST Test basic mutex functionality
+proc mutex001 { } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb_env -lock -home $testdir} env] 0
+
+ puts "\tMutex001.a: Create lock env"
+ # Now open for real
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "\tMutex001.b: Create $nlocks mutexes"
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ puts "\tMutex001.c: Get/set loop"
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/mutex002.tcl b/storage/bdb/test/mutex002.tcl
new file mode 100644
index 00000000000..193e600fe8b
--- /dev/null
+++ b/storage/bdb/test/mutex002.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex002.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex002
+# TEST Test basic mutex synchronization
+proc mutex002 { } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ puts "\tMutex002.a: Open local and remote env"
+ set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ puts "\tMutex002.b: Local and remote get/set"
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ puts "\tMutex002.c: Close remote"
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+}
diff --git a/storage/bdb/test/mutex003.tcl b/storage/bdb/test/mutex003.tcl
new file mode 100644
index 00000000000..da35ac0d115
--- /dev/null
+++ b/storage/bdb/test/mutex003.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex003.tcl,v 11.24 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST mutex003
+# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
+proc mutex003 { } {
+ source ./include.tcl
+
+ set nmutex 20
+ set iter 500
+ set procs 5
+ set mdegree 3
+ set wait 2
+ puts "Mutex003: Multi-process random mutex test"
+
+ env_cleanup $testdir
+
+ puts "\tMutex003.a: Create environment"
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ puts "\tMutex003.b: Create $procs processes"
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $testdir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $testdir\
+ $iter $nmutex $wait $mdegree &]
+ lappend pidlist $p
+ }
+ puts "\tMutex003.c: $procs independent processes now running"
+ watch_procs $pidlist
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/$i.mutexout
+ }
+}
diff --git a/storage/bdb/test/mutexscript.tcl b/storage/bdb/test/mutexscript.tcl
new file mode 100644
index 00000000000..bc410f2716d
--- /dev/null
+++ b/storage/bdb/test/mutexscript.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutexscript.tcl,v 11.16 2002/04/29 14:58:16 sandstro Exp $
+#
+# Random mutex tester.
+# Usage: mutexscript dir numiters mlocks sleepint degree
+# dir: dir in which all the mutexes live.
+# numiters: Total number of iterations.
+# nmutex: Total number of mutexes.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "mutexscript dir numiters nmutex sleepint degree"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set nmutex [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set locker [pid]
+set mypid [sanitized_pid]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts -nonewline "Mutexscript: Beginning execution for $locker:"
+puts " $numiters $nmutex $sleepint $degree"
+flush stdout
+
+# Open the environment and the mutex
+set e [berkdb_env -create -mode 0644 -lock -home $dir]
+error_check_good evn_open [is_valid_env $e] TRUE
+
+set mutex [$e mutex 0644 $nmutex]
+error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE
+
+# Sleep for awhile to make sure that everyone has gotten in
+tclsleep 5
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set mlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj [expr $nmutex - 1]]
+ set lastobj [expr $obj + 1]
+ puts "[timestamp] $locker $lnum: $obj"
+
+ # Do get, set its val to own pid, and then add to list
+ error_check_good mutex_get:$obj [$mutex get $obj] 0
+ error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0
+ lappend mlist $obj
+ if {$lastobj >= $nmutex} {
+ break
+ }
+ }
+
+ # Sleep for 10 to (100*$sleepint) ms.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+
+ # Now release locks
+ foreach i $mlist {
+ error_check_good mutex_getval:$i [$mutex getval $i] $mypid
+ error_check_good mutex_setval:$i \
+ [$mutex setval $i [expr 0 - $mypid]] 0
+ error_check_good mutex_release:$i [$mutex release $i] 0
+ }
+ puts "[timestamp] $locker released mutexes"
+ flush stdout
+}
+
+puts "[timestamp] $locker Complete"
+flush stdout
diff --git a/storage/bdb/test/ndbm.tcl b/storage/bdb/test/ndbm.tcl
new file mode 100644
index 00000000000..0bf8e0cc87c
--- /dev/null
+++ b/storage/bdb/test/ndbm.tcl
@@ -0,0 +1,144 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ndbm.tcl,v 11.16 2002/07/08 13:11:30 mjc Exp $
+#
+# Historic NDBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc ndbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "NDBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/ndbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
+ error_check_good ndbm_open [is_substr $db ndbm] 1
+ set did [open $dict]
+
+ error_check_good rdonly_false [$db rdonly] 0
+
+ set flags 0
+ set txn 0
+ set count 0
+ set skippednullkey 0
+
+ puts "\tNDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # NDBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [$db store $str $str insert]
+ error_check_good ndbm_store $ret 0
+
+ set d [$db fetch $str]
+ error_check_good ndbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tNDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
+
+ puts "\tNDBM.d: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open -rdonly $testfile]
+ error_check_good ndbm_open2 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch2 $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tNDBM.e: sequential scan and delete"
+
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open $testfile]
+ error_check_good ndbm_open3 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set ret [$db delete $key]
+ error_check_good ndbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+ error_check_good ndbm_close [$db close] 0
+}
diff --git a/storage/bdb/test/parallel.tcl b/storage/bdb/test/parallel.tcl
new file mode 100644
index 00000000000..4e101c088cb
--- /dev/null
+++ b/storage/bdb/test/parallel.tcl
@@ -0,0 +1,295 @@
+# Code to load up the tests in to the Queue database
+# $Id: parallel.tcl,v 11.28 2002/09/05 17:23:06 sandstro Exp $
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+# file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method \
+ "hash queue queueext recno rbtree frecno rrecno btree" {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/storage/bdb/test/recd001.tcl b/storage/bdb/test/recd001.tcl
new file mode 100644
index 00000000000..bc7ac6d896a
--- /dev/null
+++ b/storage/bdb/test/recd001.tcl
@@ -0,0 +1,242 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd001.tcl,v 11.40 2002/05/08 19:36:18 sandstro Exp $
+#
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
+proc recd001 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd001: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd001.db
+ set testfile2 recd001-2.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd001.b: put"}
+ { {DB del -txn TXNID $key} "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial {$len $len2} $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg
+ }
+ return
+}
diff --git a/storage/bdb/test/recd002.tcl b/storage/bdb/test/recd002.tcl
new file mode 100644
index 00000000000..ed579291283
--- /dev/null
+++ b/storage/bdb/test/recd002.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd002.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
+#
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
+proc recd002 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd002: skipping for specific pagesizes"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Queues don't do splits, so we don't really need the small page
+ # size and the small page size is smaller than the record, so it's
+ # a problem.
+ if { [string compare $omethod "-queue"] == 0 } {
+ set pagesize 4096
+ } else {
+ set pagesize 512
+ }
+ puts "Recd002: $method split recovery tests"
+
+ env_cleanup $testdir
+ set testfile recd002.db
+ set testfile2 recd002-2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -home $testdir"
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that splits
+ # happen fairly quickly.
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd003.tcl b/storage/bdb/test/recd003.tcl
new file mode 100644
index 00000000000..0fd054832ce
--- /dev/null
+++ b/storage/bdb/test/recd003.tcl
@@ -0,0 +1,119 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd003.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
+#
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
+proc recd003 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd003 skipping for method $method"
+ return
+ }
+ puts "Recd003: $method duplicate recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ # See comment in recd001.tcl for why there are two database files...
+ set testfile recd003.db
+ set testfile2 recd003-2.db
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd003.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases.
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+ set n 10
+ set dupn 2000
+ set bign 500
+
+ # List of recovery tests: {CMD MSG} pairs
+ set dlist {
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.b: add dups"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.c: remove dups all at once"}
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.d: add dups (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.e: remove dups 1 at a time"}
+ { {populate DB $omethod TXNID $dupn 1 0}
+ "Recd003.f: dup split"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.g: remove dups (change state)"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.h: add big dup"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.i: remove big dup all at once"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.j: add big dup (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.k: remove big dup 1 at a time"}
+ { {populate DB $omethod TXNID $bign 1 1}
+ "Recd003.l: split big dup"}
+ }
+
+ foreach pair $dlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd003.m: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd004.tcl b/storage/bdb/test/recd004.tcl
new file mode 100644
index 00000000000..74504ac3cd7
--- /dev/null
+++ b/storage/bdb/test/recd004.tcl
@@ -0,0 +1,95 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd004.tcl,v 11.29 2002/02/25 16:44:25 sandstro Exp $
+#
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
+proc recd004 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd004: skipping for specific pagesizes"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Recd004 skipping for method $method"
+ return
+ }
+ puts "Recd004: $method big-key on internal page recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ set testfile recd004.db
+ set testfile2 recd004-2.db
+ set eflags "-create -txn -home $testdir"
+ puts "\tRecd004.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that we
+ # elevate quickly
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
+ { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd004.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd005.tcl b/storage/bdb/test/recd005.tcl
new file mode 100644
index 00000000000..7668c9e3be3
--- /dev/null
+++ b/storage/bdb/test/recd005.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd005.tcl,v 11.34 2002/05/22 15:42:39 sue Exp $
+#
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
+proc recd005 { method args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd005: $method catastrophic recovery"
+
+ berkdb srand $rand_init
+
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} {commit abort} \
+ {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags \
+ "-create -mode 0644 -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2
+
+ puts "\tRecd005.$tnum.c: Verifying initial population"
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+
+ # Now, close the environment (so that recovery will work
+ # on NT which won't allow delete of an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline \
+ "\tRecd005.$tnum.d: About to run recovery ... "
+ flush stdout
+
+ set stat [catch \
+ {exec $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ # Substitute a file that will need recovery and try
+ # running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e:\
+ About to run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
+ }
+}
+
+proc do_one_file { dir method env env_cmd filename num op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ # Save the initial file and open the environment and the first file
+ file copy -force $dir/$filename $dir/$filename.init
+ copy_extent_file $dir $filename init
+ set oflags "-auto_commit -unknown -env $env"
+ set db [eval {berkdb_open} $oflags $filename]
+
+ # Dump out file contents for initial case
+ open_and_dump_file $filename $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set txn [$env txn]
+ error_check_bad txn_begin $txn NULL
+ error_check_good txn_begin [is_substr $txn $env] 1
+
+ # Now fill in the db and the txnid in the command
+ populate $db $method $txn $num 0 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+ file copy -force $dir/$filename $dir/$filename.afterop
+ copy_extent_file $dir $filename afterop
+ open_and_dump_file $testdir/$filename.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+ error_check_good txn_$op:$txn [$txn $op] 0
+
+ if { $op == "commit" } {
+ puts "\t\tFile $filename executed and committed."
+ } else {
+ puts "\t\tFile $filename executed and aborted."
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ file copy -force $dir/$filename $dir/$filename.final
+ copy_extent_file $dir $filename final
+
+ # If this is an abort, it should match the original file.
+ # If this was a commit, then this file should match the
+ # afterop file.
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+ error_check_good close:$db [$db close] 0
+}
+
+proc check_file { dir env_cmd filename op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+}
diff --git a/storage/bdb/test/recd006.tcl b/storage/bdb/test/recd006.tcl
new file mode 100644
index 00000000000..fc35e755b08
--- /dev/null
+++ b/storage/bdb/test/recd006.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd006.tcl,v 11.26 2002/03/15 16:30:53 sue Exp $
+#
+# TEST recd006
+# TEST Nested transactions.
+proc recd006 { method {select 0} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd006 skipping for method $method"
+ return
+ }
+ puts "Recd006: $method nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd006.db
+ set testfile $testdir/$dbfile
+
+ puts "\tRecd006.a: create database"
+ set oflags "-create $args $omethod $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [$db put -nooverwrite $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Variables used below:
+ # p1: a pair of keys that are likely to be on the same page.
+ # p2: a pair of keys that are likely to be on the same page,
+ # but on a page different than those in p1.
+ set dbc [$db cursor]
+ error_check_good dbc [is_substr $dbc $db] 1
+
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:DB_FIRST [llength $ret] 0
+ set p1 [lindex [lindex $ret 0] 0]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:DB_NEXT [llength $ret] 0
+ lappend p1 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -last]
+ error_check_bad dbc_get:DB_LAST [llength $ret] 0
+ set p2 [lindex [lindex $ret 0] 0]
+ set kvals($p2) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:DB_PREV [llength $ret] 0
+ lappend p2 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now create the full transaction environment.
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd006.b: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Reset the environment.
+ reset_env $dbenv
+
+ set p1 [list $p1]
+ set p2 [list $p2]
+
+ # List of recovery tests: {CMD MSG} pairs
+ set rlist {
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
+ "Recd006.c: children (commit commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
+ "Recd006.d: children (commit commit)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
+ "Recd006.e: children (commit abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
+ "Recd006.f: children (commit abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
+ "Recd006.g: children (abort abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
+ "Recd006.h: children (abort abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
+ "Recd006.i: children (abort commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
+ "Recd006.j: children (abort commit)"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ }
+
+ puts "\tRecd006.k: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+# Do the nested transaction test.
+# We want to make sure that children inherit properly from their
+# parents and that locks are properly handed back to parents
+# and that the right thing happens on commit/abort.
+# In particular:
+# Write lock on parent, properly acquired by child.
+# Committed operation on child gives lock to parent so that
+# other child can also get the lock.
+# Aborted op by child releases lock so other child can get it.
+# Correct database state if child commits
+# Correct database state if child aborts
+proc nesttest { db parent env do p1 p2 child1 child2} {
+ global kvals
+ source ./include.tcl
+
+ if { $do == 1 } {
+ set func toupper
+ } else {
+ set func tolower
+ }
+
+ # Do an RMW on the parent to get a write lock.
+ set p10 [lindex $p1 0]
+ set p11 [lindex $p1 1]
+ set p20 [lindex $p2 0]
+ set p21 [lindex $p2 1]
+
+ set ret [$db get -rmw -txn $parent $p10]
+ set res $ret
+ set Dret [lindex [lindex $ret 0] 1]
+ if { [string compare $Dret $kvals($p10)] == 0 ||
+ [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
+ set val 0
+ } else {
+ set val $Dret
+ }
+ error_check_good get_parent_RMW $val 0
+
+ # OK, do child 1
+ set kid1 [$env txn -parent $parent]
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
+
+ # Reading write-locked parent object should be OK
+ #puts "\tRead write-locked parent object for kid1."
+ set ret [$db get -txn $kid1 $p10]
+ error_check_good kid1_get10 $ret $res
+
+ # Now update this child
+ set data [lindex [lindex [string $func $ret] 0] 1]
+ set ret [$db put -txn $kid1 $p10 $data]
+ error_check_good kid1_put10 $ret 0
+
+ #puts "\tKid1 successful put."
+
+ # Now start child2
+ #puts "\tBegin txn for kid2."
+ set kid2 [$env txn -parent $parent]
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
+
+ # Getting anything in the p1 set should deadlock, so let's
+ # work on the p2 set.
+ set data [string $func $kvals($p20)]
+ #puts "\tPut data for kid2."
+ set ret [$db put -txn $kid2 $p20 $data]
+ error_check_good kid2_put20 $ret 0
+
+ #puts "\tKid2 data put successful."
+
+ # Now let's do the right thing to kid1
+ puts -nonewline "\tKid1 $child1..."
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good kid1_commit [$kid1 commit] 0
+ } else {
+ error_check_good kid1_abort [$kid1 abort] 0
+ }
+ puts "complete"
+
+ # In either case, child2 should now be able to get the
+ # lock, either because it is inherited by the parent
+ # (commit) or because it was released (abort).
+ set data [string $func $kvals($p11)]
+ set ret [$db put -txn $kid2 $p11 $data]
+ error_check_good kid2_put11 $ret 0
+
+ # Now let's do the right thing to kid2
+ puts -nonewline "\tKid2 $child2..."
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good kid2_commit [$kid2 commit] 0
+ } else {
+ error_check_good kid2_abort [$kid2 abort] 0
+ }
+ puts "complete"
+
+ # Now, let parent check that the right things happened.
+ # First get all four values
+ set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
+ set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
+ set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
+ set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
+
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good parent_kid1 $p10_check \
+ [string tolower [string $func $kvals($p10)]]
+ } else {
+ error_check_good \
+ parent_kid1 $p10_check [string tolower $kvals($p10)]
+ }
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good parent_kid2 $p11_check \
+ [string tolower [string $func $kvals($p11)]]
+ error_check_good parent_kid2 $p20_check \
+ [string tolower [string $func $kvals($p20)]]
+ } else {
+ error_check_good parent_kid2 $p11_check $kvals($p11)
+ error_check_good parent_kid2 $p20_check $kvals($p20)
+ }
+
+ # Now do a write on the parent for 21 whose lock it should
+ # either have or should be available.
+ set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
+ error_check_good parent_put21 $ret 0
+
+ return 0
+}
diff --git a/storage/bdb/test/recd007.tcl b/storage/bdb/test/recd007.tcl
new file mode 100644
index 00000000000..aeac3bea2c1
--- /dev/null
+++ b/storage/bdb/test/recd007.tcl
@@ -0,0 +1,886 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd007 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd007: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd007.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd007.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ set env [eval $env_cmd]
+
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
+ error_check_good envclose [$env close] 0
+
+ # Convert the args again because fixed_len is now real.
+ set opts [convert_args $method ""]
+
+ # List of recovery tests: {HOOKS MSG} pairs
+ # Where each HOOK is a list of {COPY ABORT}
+ #
+ set rlist {
+ { {"none" "preopen"} "Recd007.b0: none/preopen"}
+ { {"none" "postopen"} "Recd007.b1: none/postopen"}
+ { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
+ { {"none" "postlog"} "Recd007.b3: none/postlog"}
+ { {"none" "postsync"} "Recd007.b4: none/postsync"}
+ { {"postopen" "none"} "Recd007.c0: postopen/none"}
+ { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
+ { {"postlog" "none"} "Recd007.c2: postlog/none"}
+ { {"postsync" "none"} "Recd007.c3: postsync/none"}
+ { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
+ { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
+ { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
+ { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
+ { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
+ { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
+ { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
+ { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg
+ }
+
+ set rlist {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+ foreach op { dbremove dbrename dbtruncate } {
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $op
+ }
+ }
+
+ if { $is_windows_test != 1 } {
+ set env_cmd "berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
+ }
+
+ puts "\tRecd007.r: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Creating just a database
+ # 2. Creating a database with a subdb
+ # 3. Creating a 2nd subdb in a database
+ puts "\t$msg create with a database"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg create with a database and subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg
+ puts "\t$msg create with a database and 2nd subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg
+
+}
+
+proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ set dflags "-dar"
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_create_loc $copy] 1
+ error_check_good abort_location [is_valid_create_loc $abort] 1
+
+ if {([string first "logmeta" $copy] != -1 || \
+ [string first "logmeta" $abort] != -1) && \
+ [is_btree $method] == 0 } {
+ puts "\tSkipping for method $method"
+ $env test copy none
+ $env test abort none
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ # Basically non-existence is our initial state. When we
+ # abort, it is also our final state.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ puts "\t\tExecuting command"
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ # Sync the mpool so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ $env mpool_sync
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_bad db_open ret 0
+
+ #
+ # Check that the file does not exist. Final state.
+ #
+ if { $sub != 2 } {
+ error_check_good db_open:exists \
+ [file exists $dir/$dbfile] 0
+ } else {
+ error_check_good \
+ diff(init,postcreate):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ #
+ # Check that the file exists.
+ #
+ error_check_good db_open [file exists $dir/$dbfile] 1
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on sub)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Deleting/Renaming just a database
+ # 2. Deleting/Renaming a database with a subdb
+ # 3. Deleting/Renaming a 2nd subdb in a database
+ puts "\t$msg $op with a database"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $op
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg $op with a database and subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $op
+ puts "\t$msg $op with a database and 2nd subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $op
+
+}
+
+proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_delete_loc $copy] 1
+ error_check_good abort_location [is_valid_delete_loc $abort] 1
+
+ if { [is_record_based $method] == 1 } {
+ set key1 1
+ set key2 2
+ } else {
+ set key1 recd007_key1
+ set key2 recd007_key2
+ }
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
+
+ #
+ # Depending on what sort of subdb we want, if any, our
+ # args to the open call will be different (and if we
+ # want a 2nd subdb, we create the first here.
+ #
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
+ switch $sub {
+ 0 {
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
+ }
+ 1 {
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ $env mpool_sync
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed.
+ #
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
+ puts "\t\tCommand executed and aborted."
+ error_check_good $op $ret 1
+
+ #
+ # Check that the file exists. Final state.
+ # Compare against initial file.
+ #
+ error_check_good post$op.1 [file exists $dir/$dbfile] 1
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ $env mpool_sync
+ error_check_good env_close [$env close] 0
+ catch { file copy -force $dir/$dbfile $init_file } res
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on abort)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+
+ puts "complete"
+
+ if { $abort == "none" } {
+ #
+ # Operate was committed.
+ #
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ berkdb debug_check
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { [string first "none" $abort] != -1} {
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+#
+# This function tests a specific case of recovering after a db removal.
+# This is for SR #2538. Basically we want to test that:
+# - Make an env.
+# - Make/close a db.
+# - Remove the db.
+# - Create another db of same name.
+# - Sync db but leave open.
+# - Run recovery.
+# - Verify no recovery errors and that new db is there.
+proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+ set omethod [convert_method $method]
+
+ puts "\tRecd007.q: Delete and recreate a database"
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.1 [file exists $dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ error_check_good db_recover $stat 0
+ error_check_good db_recover.1 [file exists $dir/$dbfile] 1
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls. Basically they are there to clean
+ # up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
+ set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+}
+
+proc is_valid_create_loc { loc } {
+ switch $loc {
+ none -
+ preopen -
+ postopen -
+ postlogmeta -
+ postlog -
+ postsync
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+proc is_valid_delete_loc { loc } {
+ switch $loc {
+ none -
+ predestroy -
+ postdestroy -
+ postremcall
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+# Do a logical diff on the db dump files. We expect that either
+# the files are identical, or if they differ, that it is exactly
+# just a free/invalid page.
+# Return 1 if they are different, 0 if logically the same (or identical).
+#
+proc dbdump_diff { flags initfile dir dbfile } {
+ source ./include.tcl
+
+ set initdump $initfile.dump
+ set dbdump $dbfile.dump
+
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
+ $initfile} ret]
+ error_check_good dbdump.init $stat 0
+
+ # Do a dump without the freelist which should eliminate any
+ # recovery differences.
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
+ error_check_good dbdump.db $stat 0
+
+ set stat [filecmp $dir/$dbdump $initdump]
+
+ if {$stat == 0} {
+ return 0
+ }
+ puts "diff: $dbdump $initdump gives:\n$ret"
+ return 1
+}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags } {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist \
+ [file exists $dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/storage/bdb/test/recd008.tcl b/storage/bdb/test/recd008.tcl
new file mode 100644
index 00000000000..548813a403b
--- /dev/null
+++ b/storage/bdb/test/recd008.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd008.tcl,v 1.26 2002/02/25 16:44:26 sandstro Exp $
+#
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
+proc recd008 { method {breadth 4} {depth 4} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Recd008 skipping for method $method"
+ return
+ }
+ puts "Recd008: $method $breadth X $depth deeply nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd008.db
+
+ puts "\tRecd008.a: create database"
+ set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $count == 500} {
+ set p1 $key
+ set kvals($p1) $str
+ }
+ set ret [$db put $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ set txn_max [expr int([expr pow($breadth,$depth)])]
+ if { $txn_max < 20 } {
+ set txn_max 20
+ }
+ puts "\tRecd008.b: create environment for $txn_max transactions"
+
+ set eflags "-mode 0644 -create -txn_max $txn_max \
+ -txn -home $testdir"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ reset_env $dbenv
+
+ set rlist {
+ { {recd008_parent abort ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.c: child abort parent" }
+ { {recd008_parent commit ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.d: child commit parent" }
+ }
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ }
+
+ puts "\tRecd008.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc recd008_setkval { dbfile p1 } {
+ global kvals
+ source ./include.tcl
+
+ set db [berkdb_open $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get $p1]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+}
+
+# This is a lot like the op_recover procedure. We cannot use that
+# because it was not meant to be called recursively. This proc
+# knows about depth/breadth and file naming so that recursive calls
+# don't overwrite various initial and afterop files, etc.
+#
+# The basic flow of this is:
+# (Initial file)
+# Parent begin transaction (in op_recover)
+# Parent starts children
+# Recursively call recd008_recover
+# (children modify p1)
+# Parent modifies p1
+# (Afterop file)
+# Parent commit/abort (in op_recover)
+# (Final file)
+# Recovery test (in op_recover)
+proc recd008_parent { op env db p1key parent b0 d0 breadth depth } {
+ global kvals
+ source ./include.tcl
+
+ #
+ # Save copy of original data
+ # Acquire lock on data
+ #
+ set olddata $kvals($p1key)
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Parent spawns off children
+ #
+ set ret [recd008_txn $op $env $db $p1key $parent \
+ $b0 $d0 $breadth $depth]
+
+ puts "Child runs complete. Parent modifies data."
+
+ #
+ # Parent modifies p1
+ #
+ set newdata $olddata.parent
+ set ret [$db put -txn $parent $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Save value in kvals for later comparison
+ #
+ switch $op {
+ "commit" {
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ set kvals($p1key) $olddata
+ }
+ }
+ return 0
+}
+
+proc recd008_txn { op env db p1key parent b0 d0 breadth depth } {
+ global log_log_record_types
+ global kvals
+ source ./include.tcl
+
+ for {set d 1} {$d < $d0} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
+
+ # Save the initial file and open the environment and the file
+ for {set b $b0} {$b <= $breadth} {incr b} {
+ #
+ # Begin child transaction
+ #
+ set t [$env txn -parent $parent]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ set startd [expr $d0 + 1]
+ set child $b:$startd:$t
+ set olddata $kvals($p1key)
+ set newdata $olddata.$child
+ set ret [$db get -rmw -txn $t $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Recursively call to set up nested transactions/children
+ #
+ for {set d $startd} {$d <= $depth} {incr d} {
+ set ret [recd008_txn commit $env $db $p1key $t \
+ $b $d $breadth $depth]
+ set ret [recd008_txn abort $env $db $p1key $t \
+ $b $d $breadth $depth]
+ }
+ #
+ # Modifies p1.
+ #
+ set ret [$db put -txn $t $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Commit or abort
+ #
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Executing txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ switch $op {
+ "commit" {
+ puts "Command executed and committed."
+ error_check_good get_parent_RMW $Dret $newdata
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ puts "Command executed and aborted."
+ error_check_good get_parent_RMW $Dret $olddata
+ set kvals($p1key) $olddata
+ }
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd009.tcl b/storage/bdb/test/recd009.tcl
new file mode 100644
index 00000000000..5538d2d7652
--- /dev/null
+++ b/storage/bdb/test/recd009.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd009.tcl,v 1.18 2002/04/01 20:11:44 krinsky Exp $
+#
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
+proc recd009 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
+ puts "Recd009 skipping for method $method."
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd009: Test record numbers across splits and recovery"
+
+ set testfile recd009.db
+ env_cleanup $testdir
+ set mkeys 1000
+ set nkeys 5
+ set data "data"
+
+ puts "\tRecd009.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_recno $method] == 1 } {
+ set key $i
+ } else {
+ set key key000$i
+ }
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ set newnkeys [expr $nkeys + 1]
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
+ "Recd009.c: split"}
+ { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
+ "Recd009.d: reverse split"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ if { $reverse == -1 } {
+ set abortkeys $nkeys
+ set commitkeys $mkeys
+ set abortpg 0
+ set commitpg 1
+ } else {
+ set abortkeys $mkeys
+ set commitkeys $nkeys
+ set abortpg 1
+ set commitpg 0
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts \
+ $commitkeys $commitpg
+ }
+ puts "\tRecd009.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
+ set stat [$db stat]
+ error_check_bad stat:check-split [is_substr $stat \
+ "{{Internal pages} 0}"] $numpg
+
+ set type [$db get_type]
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ set i 1
+ puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ if { [is_btree $type] } {
+ set thisi [$dbc get -get_recno]
+ } else {
+ set thisi [lindex [lindex $d 0] 0]
+ }
+ error_check_good recno_check $i $thisi
+ error_check_good record_count [expr $i <= $numkeys] 1
+ incr i
+ }
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd009_split { db txn split method nkeys mkeys } {
+ global errorCode
+ source ./include.tcl
+
+ set data "data"
+
+ set isrecno [is_recno $method]
+ # if mkeys is above 1000, need to adjust below for lexical order
+ if { $split == 1 } {
+ puts "\tRecd009_split: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i <= $mkeys } { incr i } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ set ret [$db put -txn $txn $key $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ } else {
+ puts "\tRecd009_split: Delete added keys to force reverse split."
+ # Since rrecno renumbers, we delete downward.
+ for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ error_check_good db_del:$i [$db del -txn $txn $key] 0
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd010.tcl b/storage/bdb/test/recd010.tcl
new file mode 100644
index 00000000000..2549e03a2c0
--- /dev/null
+++ b/storage/bdb/test/recd010.tcl
@@ -0,0 +1,257 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd010.tcl,v 1.19 2002/03/15 19:05:07 sue Exp $
+#
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
+proc recd010 { method {select 0} args} {
+ if { [is_btree $method] != 1 } {
+ puts "Recd010 skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd010: skipping for specific pagesizes"
+ return
+ }
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
+
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
+ set method [convert_method $method]
+
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
+
+ set testfile recd010.db
+ env_cleanup $testdir
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set mkeys 1000
+ set firstkeys 5
+ set data "data"
+ set key "recd010_key"
+
+ puts "\tRecd010.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd010.b: Fill page with $firstkeys small dups."
+ for { set i 1 } { $i <= $firstkeys } { incr i } {
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ set kvals 1
+ set kvals_dups $firstkeys
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # List of recovery tests: {CMD MSG} pairs.
+ if { $mkeys < 100 } {
+ puts "Recd010 mkeys of $mkeys too small"
+ return
+ }
+ set rlist {
+ { {recd010_split DB TXNID 1 2 $mkeys}
+ "Recd010.c: btree split 2 large dups"}
+ { {recd010_split DB TXNID 0 2 $mkeys}
+ "Recd010.d: btree reverse split 2 large dups"}
+ { {recd010_split DB TXNID 1 10 $mkeys}
+ "Recd010.e: btree split 10 dups"}
+ { {recd010_split DB TXNID 0 10 $mkeys}
+ "Recd010.f: btree reverse split 10 dups"}
+ { {recd010_split DB TXNID 1 100 $mkeys}
+ "Recd010.g: btree split 100 dups"}
+ { {recd010_split DB TXNID 0 100 $mkeys}
+ "Recd010.h: btree reverse split 100 dups"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts abort $reverse $firstkeys
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts commit $reverse $firstkeys
+ }
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd010_check { tdir testfile opts op reverse origdups } {
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set data "data"
+
+ if { $reverse == -1 } {
+ puts "\tRecd010_check: Verify split after $op"
+ } else {
+ puts "\tRecd010_check: Verify reverse split after $op"
+ }
+
+ set stat [$db stat]
+ if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
+ ([string compare $op "commit"] == 0 && $reverse != -1)]} {
+ set numkeys 0
+ set allkeys [expr $numkeys + 1]
+ set numdups $origdups
+ #
+ # If we abort the adding of dups, or commit
+ # the removal of dups, either way check that
+ # we are back at the beginning. Check that:
+ # - We have 0 internal pages.
+ # - We have only 1 key (the original we primed the db
+ # with at the beginning of the test).
+ # - We have only the original number of dups we primed
+ # the db with at the beginning of the test.
+ #
+ error_check_good stat:orig0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:orig1 [is_substr $stat \
+ "{{Number of keys} 1}"] 1
+ error_check_good stat:orig2 [is_substr $stat \
+ "{{Number of records} $origdups}"] 1
+ } else {
+ set numkeys $kvals
+ set allkeys [expr $numkeys + 1]
+ set numdups $kvals_dups
+ #
+ # If we abort the removal of dups, or commit the
+ # addition of dups, check that:
+ # - We have > 0 internal pages.
+ # - We have the number of keys.
+ #
+ error_check_bad stat:new0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:new1 [is_substr $stat \
+ "{{Number of keys} $allkeys}"] 1
+ }
+
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ puts "\tRecd010_check: Checking key and duplicate values"
+ set key "recd010_key"
+ #
+ # Check dups are there as they should be.
+ #
+ for {set ki 0} {$ki < $numkeys} {incr ki} {
+ set datacnt 0
+ for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
+ set d [$dbc get -nextdup]} {
+ set thisdata [lindex [lindex $d 0] 1]
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
+ incr datacnt
+ }
+ error_check_good dup_count $datacnt $numdups
+ }
+ #
+ # Check that the number of expected keys (allkeys) are
+ # all of the ones that exist in the database.
+ #
+ set dupkeys 0
+ set lastkey ""
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ set thiskey [lindex [lindex $d 0] 0]
+ if { [string compare $lastkey $thiskey] != 0 } {
+ incr dupkeys
+ }
+ set lastkey $thiskey
+ }
+ error_check_good key_check $allkeys $dupkeys
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd010_split { db txn split nkeys mkeys } {
+ global errorCode
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set data "data"
+ set key "recd010_key"
+
+ set numdups [expr $mkeys / $nkeys]
+
+ set kvals $nkeys
+ set kvals_dups $numdups
+ if { $split == 1 } {
+ puts \
+"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ for {set i 0} { $i < $numdups } { incr i } {
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
+ error_check_good dbput:more $ret 0
+ }
+ }
+ } else {
+ puts \
+"\tRecd010_split: Delete $nkeys keys to force reverse split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ error_check_good db_del:$k [$db del -txn $txn $key$k] 0
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd011.tcl b/storage/bdb/test/recd011.tcl
new file mode 100644
index 00000000000..74108a30650
--- /dev/null
+++ b/storage/bdb/test/recd011.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd011.tcl,v 11.19 2002/02/25 16:44:26 sandstro Exp $
+#
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
+proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 11
+
+ puts "Recd0$tnum ($args): Test recovery to a specific timestamp."
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key KEY
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Main loop: every second or so, increment the db in a txn.
+ puts "\t\tInitial Checkpoint"
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b ($niter iterations):\
+ Transaction-protected increment loop."
+ for { set i 0 } { $i <= $niter } { incr i } {
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ set timeof($i) [timestamp -r]
+
+ # If an appropriate period has elapsed, checkpoint.
+ if { $i % $ckpt_freq == $ckpt_freq - 1 } {
+ puts "\t\tIteration $i: Checkpointing."
+ error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
+ }
+
+ # sleep for N seconds.
+ tclsleep $sleep_time
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Recover to each timestamp and check."
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
+
+ # Run db_recover.
+ set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover($i,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+ error_check_good timestamp_recover $datum [pad_data $method $i]
+
+ error_check_good db_close [$db close] 0
+ }
+
+ # Finally, recover to a time well before the first timestamp
+ # and well after the last timestamp. The latter should
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
+ puts "\tRecd0$tnum.d: Recover to before the first timestamp."
+ set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_bad db_recover(before,$t) $ret 0
+
+ puts "\tRecd0$tnum.e: Recover to after the last timestamp."
+ set t [clock format \
+ [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover(after,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open(after) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum2 [lindex [lindex $dbt 0] 1]
+
+ error_check_good timestamp_recover $datum2 $datum
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/recd012.tcl b/storage/bdb/test/recd012.tcl
new file mode 100644
index 00000000000..8231e648588
--- /dev/null
+++ b/storage/bdb/test/recd012.tcl
@@ -0,0 +1,432 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd012.tcl,v 11.27 2002/05/10 00:48:07 margo Exp $
+#
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
+proc recd012 { method {start 0} \
+ {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
+ source ./include.tcl
+
+ set tnum 12
+ set pagesize 512
+
+ if { $is_qnx_test } {
+ set niter 40
+ }
+
+ puts "Recd0$tnum $method ($args): Test recovery file management."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd012: skipping for specific pagesizes"
+ return
+ }
+
+ for { set i $start } { $i <= $niter } { incr i } {
+ env_cleanup $testdir
+
+ # For repeatability, we pass in the iteration number
+ # as a parameter and use that in recd012_body to seed
+ # the random number generator to randomize our operations.
+ # This lets us re-run a potentially failing iteration
+ # without having to start from the beginning and work
+ # our way to it.
+ #
+ # The number of databases ranges from 4 to 8 and is
+ # a function of $niter
+ # set ndbs [expr ($i % 5) + 4]
+
+ recd012_body \
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ }
+}
+
+proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
+ global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ puts "\tRecd0$tnum $method ($largs): Iteration $iter"
+ puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
+ set flags "-create -txn -home $testdir"
+ set env_cmd "berkdb_env $flags"
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Initialize random number generator based on $iter.
+ berkdb srand [expr $iter + $rand_init]
+
+ # Initialize database that keeps track of number of open files (so
+ # we don't run out of descriptors).
+ set ofname of.db
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
+ -create -dup -mode 0644 -btree -pagesize 512 $ofname]
+ error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
+ error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_txn_commit [$oftxn commit] 0
+ error_check_good of_close [$ofdb close] 0
+
+ # Create ndbs databases to work in, and a file listing db names to
+ # pick from.
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ # 50-50 chance of being a subdb, unless we're a queue.
+ if { [berkdb random_int 0 1] || [is_queue $method] } {
+ # not a subdb
+ set dbname recd0$tnum-$i.db
+ } else {
+ # subdb
+ set dbname "recd0$tnum-subdb.db s$i"
+ }
+ puts $f $dbname
+ set db [eval berkdb_open $oflags $dbname]
+ error_check_good db($i) [is_valid_db $db] TRUE
+ error_check_good db($i)_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close [$dbenv close] 0
+
+ # Now we get to the meat of things. Our goal is to do some number
+ # of opens, closes, updates, and shutdowns (simulated here by a
+ # close of all open handles and a close/reopen of the environment,
+ # with or without an envremove), matching the regular expression
+ #
+ # ((O[OUC]+S)+R+V)
+ #
+ # We'll repeat the inner + a random number up to $niniter times,
+ # and the outer + a random number up to $noutiter times.
+ #
+ # In order to simulate shutdowns, we'll perform the opens, closes,
+ # and updates in a separate process, which we'll exit without closing
+ # all handles properly. The environment will be left lying around
+ # before we run recovery 50% of the time.
+ set out [berkdb random_int 1 $noutiter]
+ puts \
+ "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
+ for { set i 0 } { $i < $out } { incr i } {
+ set child [open "|$tclsh_path" w]
+
+ # For performance, don't source everything,
+ # just what we'll need.
+ puts $child "load $tcllib"
+ puts $child "set fixed_len $fixed_len"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd0$tnum.tcl"
+
+ set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
+
+ # Go.
+ berkdb debug_check
+ puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ $ndbs $tnum $method $ofname $largs"
+ close $child
+
+ # Run recovery 0-3 times.
+ set nrecs [berkdb random_int 0 3]
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover \
+ -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ file mkdir /tmp/12out
+ set fd [open /tmp/12out/[pid] w]
+ puts $fd $res
+ close $fd
+ }
+ error_check_good recover($j) $ret 0
+ }
+ }
+
+ # Run recovery one final time; it doesn't make sense to
+ # check integrity if we do not.
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ puts $res
+ }
+
+ # Make sure each datum is the correct filename.
+ puts "\t\tRecd0$tnum.c: Checking data integrity."
+ set dbenv [berkdb_env -create -private -home $testdir]
+ error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbinfo] > 0 } {
+ set db [eval berkdb_open -env $dbenv $dbinfo]
+ error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good integrity [lindex [lindex $dbt 0] 1] \
+ [pad_data $method $dbinfo]
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close_integrity [$dbenv close] 0
+
+ # Verify
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
+}
+
+proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
+ ofname args } {
+ global recd012_ofkey
+ source ./include.tcl
+ if { [is_record_based $method] } {
+ set keybase ""
+ } else {
+ set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
+ }
+
+ # Initialize our random number generator, repeatably based on an arg.
+ berkdb srand $rnd
+
+ # Open our env.
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Find out how many databases appear to be open in the log--we
+ # don't want recovery to run out of filehandles.
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
+ error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
+ set nopenfiles [lindex [lindex $dbt 0] 1]
+
+ error_check_good of_commit [$oftxn commit] 0
+
+ # Read our dbnames
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbname($i)] > 0 } {
+ incr i
+ }
+ close $f
+
+ # We now have $ndbs extant databases.
+ # Open one of them, just to get us started.
+ set opendbs {}
+ set oflags "-env $dbenv $args"
+
+ # Start a transaction, just to get us started.
+ set curtxn [$dbenv txn]
+ error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
+
+ # Inner loop. Do $in iterations of a random open, close, or
+ # update, where $in is between 1 and $niniter.
+ set in [berkdb random_int 1 $niniter]
+ for { set j 0 } { $j < $in } { incr j } {
+ set op [berkdb random_int 0 2]
+ switch $op {
+ 0 {
+ # Open.
+ recd012_open
+ }
+ 1 {
+ # Update. Put random-number$keybase as key,
+ # filename as data, into random database.
+ set num_open [llength $opendbs]
+ if { $num_open == 0 } {
+ # If none are open, do an open first.
+ recd012_open
+ }
+ set n [berkdb random_int 0 [expr $num_open - 1]]
+ set pair [lindex $opendbs $n]
+ set udb [lindex $pair 0]
+ set uname [lindex $pair 1]
+
+ set key [berkdb random_int 1000 1999]$keybase
+ set data [chop_data $method $uname]
+ error_check_good put($uname,$udb,$key,$data) \
+ [$udb put -txn $curtxn $key $data] 0
+
+ # One time in four, commit the transaction.
+ if { [berkdb random_int 0 3] == 0 && 0 } {
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+ 2 {
+ # Close.
+ if { [llength $opendbs] == 0 } {
+ # If none are open, open instead of closing.
+ recd012_open
+ continue
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 \
+ [expr [llength $opendbs] - 1]]
+
+ set db [lindex [lindex $opendbs $which] 0]
+ error_check_good db_choice [is_valid_db $db] TRUE
+ global errorCode errorInfo
+
+ error_check_good db_close \
+ [[lindex [lindex $opendbs $which] 0] close] 0
+
+ set opendbs [lreplace $opendbs $which $which]
+ incr nopenfiles -1
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+
+ # One time in two hundred, checkpoint.
+ if { [berkdb random_int 0 199] == 0 } {
+ puts "\t\t\tRecd0$tnum:\
+ Random checkpoint after operation $outiter.$j."
+ error_check_good txn_ckpt \
+ [$dbenv txn_checkpoint] 0
+ set nopenfiles \
+ [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
+ }
+ }
+
+ # We have to commit curtxn. It'd be kind of nice not to, but
+ # if we start in again without running recovery, we may block
+ # ourselves.
+ error_check_good curtxn_commit [$curtxn commit] 0
+
+ # Put back the new number of open files.
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
+ error_check_good of_put \
+ [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
+ error_check_good of_commit [$oftxn commit] 0
+ error_check_good ofdb_close [$ofdb close] 0
+}
+
+proc recd012_open { } {
+ # This is basically an inline and has to modify curtxn,
+ # so use upvars.
+ upvar curtxn curtxn
+ upvar ndbs ndbs
+ upvar dbname dbname
+ upvar dbenv dbenv
+ upvar oflags oflags
+ upvar opendbs opendbs
+ upvar nopenfiles nopenfiles
+
+ # Return without an open if we've already opened too many files--
+ # we don't want to make recovery run out of filehandles.
+ if { $nopenfiles > 30 } {
+ #puts "skipping--too many open files"
+ return -code break
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 [expr $ndbs - 1]]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
+ lappend opendbs [list $db $dbname($which)]
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
+
+ incr nopenfiles
+}
+
+# Update the database containing the number of files that db_recover has
+# to contend with--we want to avoid letting it run out of file descriptors.
+# We do this by keeping track of the number of unclosed opens since the
+# checkpoint before last.
+# $recd012_ofkey stores this current value; the two dups available
+# at $recd012_ofckptkey store the number of opens since the last checkpoint
+# previous.
+# Thus, if the current value is 17 when we do a checkpoint, and the
+# stored values are 3 and 8, the new current value (which we return)
+# is 14, and the new stored values are 8 and 6.
+proc recd012_nopenfiles_ckpt { env db nopenfiles } {
+ global recd012_ofckptkey
+ set txn [$env txn]
+ error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
+
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get the first ckpt value and delete it.
+ set dbt [$dbc get -set $recd012_ofckptkey]
+ error_check_good set [llength $dbt] 1
+
+ set discard [lindex [lindex $dbt 0] 1]
+ error_check_good del [$dbc del] 0
+
+ set nopenfiles [expr $nopenfiles - $discard]
+
+ # Get the next ckpt value
+ set dbt [$dbc get -nextdup]
+ error_check_good set2 [llength $dbt] 1
+
+ # Calculate how many opens we've had since this checkpoint before last.
+ set onlast [lindex [lindex $dbt 0] 1]
+ set sincelast [expr $nopenfiles - $onlast]
+
+ # Put this new number at the end of the dup set.
+ error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
+
+ # We should never deadlock since we're the only one in this db.
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ return $nopenfiles
+}
+
+# globals -- it's not worth passing these around, as they're constants
+set recd012_ofkey OPENFILES
+set recd012_ofckptkey CKPTS
diff --git a/storage/bdb/test/recd013.tcl b/storage/bdb/test/recd013.tcl
new file mode 100644
index 00000000000..e08654f34e0
--- /dev/null
+++ b/storage/bdb/test/recd013.tcl
@@ -0,0 +1,287 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd013.tcl,v 11.18 2002/02/25 16:44:27 sandstro Exp $
+#
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
+#
+# XXX
+# Other tests that cover more specific variants of the same issue
+# are in the access method tests for now. This is probably wrong; we
+# put this one here because they're closely based on and intertwined
+# with other, non-transactional cursor stability tests that are among
+# the access method tests, and because we need at least one test to
+# fit under recd and keep logtrack from complaining. We'll sort out the mess
+# later; the important thing, for now, is that everything that needs to gets
+# tested. (This really shouldn't be under recd at all, since it doesn't
+# run recovery!)
+proc recd013 { method { nitems 100 } args } {
+ source ./include.tcl
+ global alphabet log_log_record_types
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 13
+ set pgsz 512
+
+ puts "Recd0$tnum $method ($args): Test of aborted cursor adjustments."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd013: skipping for specific pagesizes"
+ return
+ }
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set keybase ""
+ } else {
+ set keybase "key"
+ }
+
+ puts "\tRecd0$tnum.a:\
+ Create environment, database, and parent transaction."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Create a database containing $nitems items, numbered with odds.
+ # We'll then put the even numbers during the body of the test.
+ set txn [$env txn]
+ error_check_good init_txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
+ error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+ }
+
+ puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\tRecd0$tnum.b: Put test."
+ puts "\t\tRecd0$tnum.b.1: Put items."
+ set ctxn [$env txn -parent $txn]
+ error_check_good txn [is_valid_txn $ctxn $env] TRUE
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
+
+ # If we're a renumbering recno, this is uninteresting.
+ # Stir things up by putting a few additional records at
+ # the beginning.
+ if { [is_rrecno $method] == 1 } {
+ set curs [$db cursor -txn $ctxn]
+ error_check_bad llength_get_first \
+ [llength [$curs get -first]] 0
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+ # expect a recno!
+ error_check_good rrecno_put($i) \
+ [$curs put -before ADDITIONAL.$i] 1
+ error_check_good curs_close [$curs close] 0
+ }
+ }
+
+ puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
+
+ # Now put back all the even records, this time in the parent.
+ # Commit and re-begin the transaction so we can abort and
+ # get back to a nice full database.
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Delete test. Set a cursor to each record. Delete the even ones
+ # in the parent and check cursor stability. Then open a child
+ # transaction, and delete the odd ones. Verify that the database
+ # is empty.
+ puts "\tRecd0$tnum.c: Delete test."
+ unset dbc
+
+ # Create cursors pointing at each item.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
+ TRUE
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
+
+ if { [is_rrecno $method] != 1 } {
+ set init 2
+ set bound [expr 2 * $nitems]
+ set step 2
+ } else {
+ # In rrecno, deletes will renumber the items, so we have
+ # to take that into account when we delete by recno.
+ set init 2
+ set bound [expr $nitems + 1]
+ set step 1
+ }
+
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
+
+ set ctxn [$env txn -parent $txn]
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ # If this is an rrecno, just delete the first
+ # item repeatedly--the renumbering will make
+ # that delete everything.
+ set j 1
+ }
+ error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
+ }
+
+ # Verify that everyone's deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good get_deleted($i) \
+ [llength [$db get -txn $ctxn $keybase$i]] 0
+ }
+
+ puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
+
+ puts "\tRecd0$tnum.d: Clean up."
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
+
+ if { $log_log_record_types == 1 } {
+ logtrack_read $testdir
+ }
+}
diff --git a/storage/bdb/test/recd014.tcl b/storage/bdb/test/recd014.tcl
new file mode 100644
index 00000000000..6796341dca2
--- /dev/null
+++ b/storage/bdb/test/recd014.tcl
@@ -0,0 +1,445 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd014.tcl,v 1.19 2002/08/15 19:21:24 sandstro Exp $
+#
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd014 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { ![is_queueext $method] == 1 } {
+ puts "Recd014: Skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd014: skipping for specific pagesizes"
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ #
+ # We will use 512-byte pages, to be able to control
+ # when extents get created/removed.
+ #
+ set fixed_len 300
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ #
+ # We want to set -extent 1 instead of what
+ # convert_args gave us.
+ #
+ set exti [lsearch -exact $opts "-extent"]
+ incr exti
+ set opts [lreplace $opts $exti $exti 1]
+
+ puts "Recd014: $method extent creation/deletion tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd014.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd014.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ puts "\tRecd014.b: Create test commit"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.b: Create test abort"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ puts "\tRecd014.c: Consume test commit"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.c: Consume test abort"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ set fixed_len $orig_fixed_len
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
+ global log_log_record_types
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set init_file $dir/$dbfile.init
+ set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set oflags "-env $env $noenvflags"
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ #
+ # The command to execute to create an extent is a put.
+ # We are just creating the first one, so our extnum is 0.
+ #
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+ puts "\t\tExecuting command"
+ set putrecno [$db put -txn $t -append $data]
+ error_check_good db_put $putrecno 1
+
+ # Sync the db so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ error_check_good extput:exists1 [file exists $dbq] 1
+ set ret [$db get $putrecno]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted. Verify our entry is not there.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good db_get [llength $ret] 0
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_get [llength $ret] 1
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ }
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still does/n't exist when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ #
+ # Verify it did not change.
+ #
+ error_check_good extput:exists2 [file exists $dbq] 1
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # Undo.
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+ move_file_extent $dir $dbfile afterop copy
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # To redo, remove the dbfiles. Run recovery again.
+ #
+ catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
+ copy_extent_file $dir $dbfile renamed rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ #
+ # !!!
+ # Even though db_recover exits with status 0, it should print out
+ # a warning because the file didn't exist. Db_recover writes this
+ # to stderr. Tcl assumes that ANYTHING written to stderr is an
+ # error, so even though we exit with 0 status, we still get an
+ # error back from 'catch'. Look for the warning.
+ #
+ if { $stat == 1 && [is_substr $result "warning"] == 0 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ #
+ # Verify it was redone. However, since we removed the files
+ # to begin with, recovery with abort will not recreate the
+ # extent. Recovery with commit will.
+ #
+ if {$txncmd == "abort"} {
+ error_check_good extput:exists3 [file exists $dbq] 0
+ } else {
+ error_check_good extput:exists3 [file exists $dbq] 1
+ }
+}
+
+proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ if { $txncmd == "commit" } {
+ #
+ # Operation was committed. Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation aborted. The file is there, but make
+ # sure the item is not.
+ #
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set ret [$xdb get $putrecno]
+ error_check_good db_get [llength $ret] 0
+ error_check_good db_close [$xdb close] 0
+ }
+}
+
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
+ global log_log_record_types
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
+ -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set putrecno [$db put -txn $txn -append $data]
+ error_check_good db_put $putrecno 1
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\t\tExecuting command"
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed until recovery is run.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set dbcmd "$db get -txn $t -consume"
+ set ret [eval $dbcmd]
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted, verify ext did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+
+ #
+ # Check that the file exists. Final state.
+ # Since we aborted the txn, we should be able
+ # to get to our original entry.
+ #
+ error_check_good postconsume.1 [file exists $dbq] 1
+ error_check_good \
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ #
+ # Check file existence. Consume operations remove
+ # the extent when we move off, which we should have
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here on what we ended up with. Should be a no-op.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Run recovery here. Re-do the operation.
+ # Verify that the file doesn't exist
+ # (if we committed) or change (if we aborted)
+ # when we are done.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover2 [file exists $dbq] 0
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover3 [file exists $dbq] 0
+ }
+}
diff --git a/storage/bdb/test/recd015.tcl b/storage/bdb/test/recd015.tcl
new file mode 100644
index 00000000000..8c3ad612419
--- /dev/null
+++ b/storage/bdb/test/recd015.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd015.tcl,v 1.13 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ puts "$msg Simple test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 250
+ set testfile recd015.db
+ set txnmax [expr $numtxns + 5]
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ env_cleanup $testdir
+ set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set msg "\tRecd015.b"
+ puts "$msg Large test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/storage/bdb/test/recd016.tcl b/storage/bdb/test/recd016.tcl
new file mode 100644
index 00000000000..504aca09617
--- /dev/null
+++ b/storage/bdb/test/recd016.tcl
@@ -0,0 +1,183 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd016.tcl,v 11.8 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST recd016
+# TEST This is a recovery test for testing running recovery while
+# TEST recovery is already running. While bad things may or may not
+# TEST happen, if recovery is then run properly, things should be correct.
+proc recd016 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd016: $method ($args) simultaneous recovery test"
+ puts "Recd016: Skipping; waiting on SR #6277"
+ return
+
+ # Create the database and environment.
+ set testfile recd016.db
+
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ cleanup $testdir NULL
+
+ #
+ # Use a smaller log to make more files and slow down recovery.
+ #
+ set gflags ""
+ set pflags ""
+ set log_max [expr 256 * 1024]
+ set nentries 10000
+ set nrec 6
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ # Since we are using txns, we need at least 1 lock per
+ # record (for queue). So set lock_max accordingly.
+ set lkmax [expr $nentries * 2]
+
+ puts "\tRecd016.a: Create environment and database"
+ set env_cmd "berkdb_env -create -log_max $log_max \
+ -lock_max $lkmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} \
+ $omethod -auto_commit -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set abid [open $t4 w]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc recd016_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc recd016.check
+ }
+ puts "\tRecd016.b: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # Start a transaction. Alternately abort and commit them.
+ # This will create a bigger log for recovery to collide.
+ #
+ set txn [$env txn]
+ set ret [eval \
+ {$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ if {[expr $count % 2] == 0} {
+ set ret [$txn commit]
+ error_check_good txn_commit $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good commit_get \
+ $ret [list [list $key [pad_data $method $str]]]
+ } else {
+ set ret [$txn abort]
+ error_check_good txn_abort $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good abort_get [llength $ret] 0
+ puts $abid $key
+ }
+ incr count
+ }
+ close $did
+ close $abid
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set pidlist {}
+ puts "\tRecd016.c: Start up $nrec recovery processes at once"
+ for {set i 0} {$i < $nrec} {incr i} {
+ set p [exec $util_path/db_recover -h $testdir -c &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+ #
+ # Now that they are all done run recovery correctly
+ puts "\tRecd016.d: Run recovery process"
+ set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tRecd016.e: Open, dump and check database"
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j $i
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t4 $t3
+ file rename -force $t3 $t4
+ fileextract $t2 $t4 $t3
+ file rename -force $t3 $t5
+
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good envclose [$env close] 0
+
+ error_check_good Recd016:diff($t5,$t3) \
+ [filecmp $t5 $t3] 0
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+# Check function for recd016; keys and data are identical
+proc recd016.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc recd016_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/recd017.tcl b/storage/bdb/test/recd017.tcl
new file mode 100644
index 00000000000..9f8208c1b3e
--- /dev/null
+++ b/storage/bdb/test/recd017.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd017.tcl,v 11.4 2002/09/03 16:44:37 sue Exp $
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/storage/bdb/test/recd018.tcl b/storage/bdb/test/recd018.tcl
new file mode 100644
index 00000000000..fb5a589d851
--- /dev/null
+++ b/storage/bdb/test/recd018.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd018.tcl,v 11.2 2002/03/13 21:04:20 sue Exp $
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 18
+
+ puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/storage/bdb/test/recd019.tcl b/storage/bdb/test/recd019.tcl
new file mode 100644
index 00000000000..dd67b7dcb2a
--- /dev/null
+++ b/storage/bdb/test/recd019.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd019.tcl,v 11.3 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+}
diff --git a/storage/bdb/test/recd020.tcl b/storage/bdb/test/recd020.tcl
new file mode 100644
index 00000000000..93a89f32578
--- /dev/null
+++ b/storage/bdb/test/recd020.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd020.tcl,v 11.8 2002/08/08 15:38:08 bostic Exp $
+#
+# TEST recd020
+# TEST Test recovery after checksum error.
+proc recd020 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd020: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Recd020: skipping for method $method"
+ return
+ }
+
+ puts "Recd020: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd020.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd020.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd020.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd020.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: catastrophic recovery required"] 1
+ set got_err 1
+ # Now verify that it was an error on the page we set.
+ error_check_good dbget:pg$pg [is_substr $ret \
+ "failed for page $pg"] 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "run recovery"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd020.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error: catastrophic recovery required"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd020.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
diff --git a/storage/bdb/test/recd15scr.tcl b/storage/bdb/test/recd15scr.tcl
new file mode 100644
index 00000000000..e1238907a71
--- /dev/null
+++ b/storage/bdb/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd15scr.tcl,v 1.5 2002/01/30 13:18:04 margo Exp $
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/storage/bdb/test/recdscript.tcl b/storage/bdb/test/recdscript.tcl
new file mode 100644
index 00000000000..a2afde46e4d
--- /dev/null
+++ b/storage/bdb/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recdscript.tcl,v 11.4 2002/01/11 15:53:32 bostic Exp $
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+
+op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
+flush stdout
diff --git a/storage/bdb/test/rep001.tcl b/storage/bdb/test/rep001.tcl
new file mode 100644
index 00000000000..97a640029f5
--- /dev/null
+++ b/storage/bdb/test/rep001.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep001.tcl,v 1.16 2002/08/26 17:52:19 margo Exp $
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can correctly
+# TEST run test001 and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "01" } args } {
+ global passwd
+
+ puts "Rep0$tnum: Replication sanity test."
+
+ set envargs ""
+ rep001_sub $method $niter $tnum $envargs $args
+
+ puts "Rep0$tnum: Replication and security sanity test."
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ rep001_sub $method $niter $tnum $envargs $args
+}
+
+proc rep001_sub { method niter tnum envargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set masterenv \
+ [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
+ $envargs {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
+ {-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open a test database on the master (so we can test having handles
+ # open across an upgrade).
+ puts "\tRep0$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep0$tnum.b: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.c: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Remove the file (and update client).
+ puts "\tRep0$tnum.d: Remove the file on the master and close master."
+ error_check_good remove \
+ [$masterenv dbremove -auto_commit test0$tnum.db] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Don't get confused in Tcl.
+ puts "\tRep0$tnum.e: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run test001 in the new master
+ puts "\tRep0$tnum.f: Running test001 in new master."
+ eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ puts "\tRep0$tnum.g: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env -create -recover} $envargs \
+ -txn -lock_max 2500 \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ error_check_good newclient_env [is_valid_env $newclientenv] TRUE
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set stats [$newmasterenv rep_stat]
+ set nthrottles [getstats $stats {Transmission limited}]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+
+ # Run a modified test001 in the new master (and update client).
+ puts "\tRep0$tnum.h: Running test001 in new master."
+ eval test001 $method \
+ $niter $niter $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.j: Verifying new client database contents."
+ set testdir [get_home $newmasterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $newclientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/storage/bdb/test/rep002.tcl b/storage/bdb/test/rep002.tcl
new file mode 100644
index 00000000000..68666b0d0f0
--- /dev/null
+++ b/storage/bdb/test/rep002.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep002.tcl,v 11.11 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
+ source ./include.tcl
+ global elect_timeout
+
+ set elect_timeout 1000000
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep0$tnum.d: Starting election without dead master."
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # We want to verify all the clients but the one that declared an
+ # election get the election message.
+ # We also want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the returned newmaster,
+ # if any, is 1 (the master's replication ID).
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+
+ incr nproced [replprocessqueue $masterenv 1 0 he nm]
+
+ if { $he == 1 } {
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
+ set got_hold_elect(M) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] 0 \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good got_hold_elect(master) $got_hold_elect(M) 0
+ unset got_hold_elect(M)
+ # error_check_good got_hold_elect(0) $got_hold_elect(0) 0
+ unset got_hold_elect(0)
+ for { set i 1 } { $i < $nclients } { incr i } {
+ error_check_good got_hold_elect($i) $got_hold_elect($i) 1
+ unset got_hold_elect($i)
+ }
+
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep0$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep0$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+
+ # Client #1 has priority 100; everyone else
+ # has priority 10.
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc reptwo { args } { eval rep002 $args }
diff --git a/storage/bdb/test/rep003.tcl b/storage/bdb/test/rep003.tcl
new file mode 100644
index 00000000000..7bb7e00ddbf
--- /dev/null
+++ b/storage/bdb/test/rep003.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep003.tcl,v 11.9 2002/08/09 02:23:50 margo Exp $
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment; start up,
+# TEST shut down, and restart client processes, with and without recovery.
+# TEST To ensure that environment state is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "03" } args } {
+ source ./include.tcl
+ global testdir rep003_dbname rep003_omethod rep003_oargs
+
+ env_cleanup $testdir
+ set niter 10
+ set rep003_dbname rep003.db
+
+ if { [is_record_based $method] } {
+ puts "Rep0$tnum: Skipping for method $method"
+ return
+ }
+
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+
+ puts "Rep0$tnum: Replication repeated-startup test"
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb_env_noerr -create -log_max 1000000 \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep0$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep0$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep0$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover_fatal -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/storage/bdb/test/rep004.tcl b/storage/bdb/test/rep004.tcl
new file mode 100644
index 00000000000..e1d4d3b65c7
--- /dev/null
+++ b/storage/bdb/test/rep004.tcl
@@ -0,0 +1,198 @@
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep004
+# TEST Test of DB_REP_LOGSONLY.
+# TEST
+# TEST Run a quick put test in a master environment that has one logs-only
+# TEST client. Shut down, then run catastrophic recovery in the logs-only
+# TEST client and check that the database is present and populated.
+
+proc rep004 { method { nitems 10 } { tnum "04" } args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+ set dbname rep0$tnum.db
+
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $args]
+
+ puts "Rep0$tnum: Test of logs-only replication clients"
+
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ set logsonlydir $testdir/LOGSONLYDIR
+ file mkdir $logsonlydir
+
+ # Open a master, a logsonly replica, and a normal client.
+ repladd 1
+ set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
+ -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
+ -rep_transport [list 2 replsend]]
+ error_check_good logsonly_env [is_valid_env $loenv] TRUE
+
+ repladd 3
+ set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
+ -rep_transport [list 3 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+
+ puts "\tRep0$tnum.a: Populate database."
+
+ set db [eval {berkdb open -create -mode 0644 -auto_commit} \
+ -env $masterenv $oargs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ }
+
+ puts "\tRep0$tnum.b: Sync up clients."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+ incr nproced [replprocessqueue $clientenv 3]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ puts "\tRep0$tnum.c: Get master and logs-only client ahead."
+ set newcount 0
+ while { [gets $did str] != -1 && $newcount < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ incr newcount
+ }
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ # "Crash" the master, and fail over to the upgradeable client.
+ error_check_good masterenv_close [$masterenv close] 0
+ replclear 3
+
+ error_check_good upgrade_client [$clientenv rep_start -master] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $clientenv 3]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good loenv_close [$loenv close] 0
+
+ puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
+ set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
+
+ puts "\tRep0$tnum.f: Verify logs-only client contents."
+ set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
+ set loc [$lodb cursor]
+
+ set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
+ set cc [$cdb cursor]
+
+ # Make sure new master and recovered logs-only replica match.
+ for { set cdbt [$cc get -first] } \
+ { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
+ set lodbt [$loc get -next]
+
+ error_check_good newmaster_replica_match $cdbt $lodbt
+ }
+
+ # Reset new master cursor.
+ error_check_good cc_close [$cc close] 0
+ set cc [$cdb cursor]
+
+ for { set lodbt [$loc get -first] } \
+ { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
+ set cdbt [$cc get -next]
+
+ error_check_good replica_newmaster_match $lodbt $cdbt
+ }
+
+ error_check_good loc_close [$loc close] 0
+ error_check_good lodb_close [$lodb close] 0
+ error_check_good loenv_close [$loenv close] 0
+
+ error_check_good cc_close [$cc close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ close $did
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/storage/bdb/test/rep005.tcl b/storage/bdb/test/rep005.tcl
new file mode 100644
index 00000000000..e0515f1cd62
--- /dev/null
+++ b/storage/bdb/test/rep005.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep005.tcl,v 11.3 2002/08/08 18:13:13 sue Exp $
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method { niter 10 } { tnum "05" } args } {
+ source ./include.tcl
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set nclients 3
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+ #
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep0$tnum"
+ set count 0
+ foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c1 { electinit electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c2 { electinit electvote1 electwait1 \
+ electvote2 electwait2 } {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd clientenv $qdir $m \
+ $count $elist
+ incr count
+ }
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep005_elect { ecmd cenv qdir msg count elist } {
+ global elect_timeout
+ upvar $ecmd env_cmd
+ upvar $cenv clientenv
+
+ set elect_timeout 1000000
+ set nclients [llength $elist]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) [lindex $elist $i]
+ }
+ puts "\t$msg.d.$count: Starting election with errors $elist"
+ set elect_pipe(0) [start_election $qdir $env_cmd(0) \
+ [expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+# puts "Processing queue for client $i"
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # Client #1 has priority 100; everyone else
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+# puts "Starting election on client $i"
+ set elect_pipe($i) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout $err_cmd($i)]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+}
diff --git a/storage/bdb/test/reputils.tcl b/storage/bdb/test/reputils.tcl
new file mode 100644
index 00000000000..340e359f26d
--- /dev/null
+++ b/storage/bdb/test/reputils.tcl
@@ -0,0 +1,659 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: reputils.tcl,v 11.34 2002/08/12 17:54:18 sandstro Exp $
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+global elect_timeout
+set elect_timeout 50000000
+set drop 0
+
+# Create the directory structure for replication testing.
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+#
+proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run.
+ #
+ set lmax [expr 3 * 1024 * 1024]
+ set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
+ {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set clientenv [eval {berkdb_env -create} $envargs -txn \
+ {-cachesize { 0 10000000 0 }} -lock_max 10000 \
+ {-home $clientdir($i) -rep_client -rep_transport \
+ [list $envid replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $tnum $nclients $oob
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+
+proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts -nonewline "\t$name: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set do_check 1
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set stats [$clientenv rep_stat]
+ set queued [getstats $stats \
+ {Total log records queued}]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [getstats $stats \
+ {Log records requested}]
+ error_check_bad requested_stats \
+ $requested -1
+ if { $queued != 0 && $do_check != 0 } {
+ error_check_good num_requested \
+ [expr $requested < $queued] 1
+ }
+
+ $clientenv rep_request 1 1
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ set do_check 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$name: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 4 128
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+
+proc repl_envver0 { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+ set name [format "Repl%03d" $tnum]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying client $i database \
+ $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+
+proc repl_verdel { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set name [format "Repl%03d" $tnum]
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$name: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $tnum $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying emptiness of client database $i."
+
+ set db [berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { tnum envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global testdir
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ set do_check 0
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts "\t$name: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ }
+ repl_envprocq $tnum $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$name: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$name: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Close up a replication group
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb_env -create -txn -lock_max 20000 -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ return 0
+}
+
+# Nuke all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+
+proc replprocessqueue { dbenv machid { skip_interval 0 } \
+ { hold_electp NONE } { newmasterp NONE } } {
+ global queuedbs queueenv errorCode
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # newmasterp is the same idea, only returning the ID of a master
+ # given in a DB_REP_NEWMASTER return.
+ if { [string compare $newmasterp NONE] != 0 } {
+ upvar $newmasterp newmaster
+ }
+ set newmaster 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ continue
+ }
+ }
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save our recno, close
+ # the cursor, and then reopen and reset the cursor.
+ set recno [lindex [lindex $dbt 0] 0]
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set $recno]
+
+ if { $ret != 0 } {
+ if { [is_substr $res DB_REP_HOLDELECTION] } {
+ set hold_elect 1
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ $dbc del
+
+ if { $ret == 0 && $res != 0 } {
+ if { [is_substr $res DB_REP_NEWSITE] } {
+ # NEWSITE; do nothing.
+ } else {
+ set newmaster $res
+ # Break as soon as we get a NEWMASTER message;
+ # our caller needs to handle it.
+ break
+ }
+ }
+
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election { qdir envstring nsites pri timeout {err "none"}} {
+ source ./include.tcl
+ global elect_serial elect_timeout elections_in_progress machids
+
+ incr elect_serial
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+
+ puts $t "source $test_path/test.tcl"
+ puts $t "replsetup $qdir"
+ foreach i $machids { puts $t "repladd $i" }
+ puts $t "set env_cmd \{$envstring\}"
+ puts $t "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
+# puts "Start election err $err, env $envstring"
+ puts $t "\$dbenv test abort $err"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ if { $err != "none" } {
+ puts $t "\$dbenv test abort none"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ }
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ set t $elections_in_progress($i)
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
diff --git a/storage/bdb/test/rpc001.tcl b/storage/bdb/test/rpc001.tcl
new file mode 100644
index 00000000000..1b65639014f
--- /dev/null
+++ b/storage/bdb/test/rpc001.tcl
@@ -0,0 +1,449 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc001.tcl,v 11.33 2002/07/25 22:57:32 mjc Exp $
+#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+# TEST Test RPC specifics, primarily that unsupported functions return
+# TEST errors and such.
+proc rpc001 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ #
+ # First test timeouts on server.
+ #
+ set ttime 5
+ set itime 10
+ puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime&]
+ }
+ puts "\tRpc001.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc001.b: Creating environment"
+
+ set testfile "rpc001.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc001.c: Opening a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set curs_list {}
+ set txn_list {}
+ puts "\tRpc001.d: Basic timeout test"
+ puts "\tRpc001.d1: Starting a transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ lappend txn_list $txn
+
+ puts "\tRpc001.d2: Open a cursor in that transaction"
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d3: Duplicate that cursor"
+ set dbc [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d4: Starting a nested transaction"
+ set txn [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+
+ puts "\tRpc001.d5: Create a cursor, no transaction"
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d6: Timeout cursor and transactions"
+ set sleeptime [expr $ttime + 2]
+ tclsleep $sleeptime
+
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$db stat} ret]
+ error_check_good dbstat $stat 0
+
+ #
+ # Check that every handle we opened above is timed out
+ #
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ set txn_list {}
+ set ntxns 8
+ puts "\tRpc001.e: Nested ($ntxns x $ntxns) transaction activity test"
+ puts "\tRpc001.e1: Starting parent transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ set parent_txn $txn
+
+ #
+ # First set a breadth of 'ntxns'
+ # We need 2 from this set for testing later on. Just set them
+ # up separately first.
+ #
+ puts "\tRpc001.e2: Creating $ntxns child transactions"
+ set child0 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set child1 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+
+ for {set i 2} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ #
+ # Now make one 'ntxns' deeply nested.
+ # Add one more for testing later on separately.
+ #
+ puts "\tRpc001.e3: Creating $ntxns nested child transactions"
+ for {set i 0} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $last_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ }
+ set last_parent $last_txn
+ set last_txn [$env txn -parent $last_parent]
+ error_check_good txn_begin [is_valid_txn $last_txn $env] TRUE
+
+ puts "\tRpc001.e4: Open a cursor in deepest transaction"
+ set dbc [$db cursor -txn $last_txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tRpc001.e5: Duplicate that cursor"
+ set dbcdup [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbcdup $db] TRUE
+ lappend curs_list $dbcdup
+
+ puts "\tRpc001.f: Timeout then activate duplicate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbcdup close} ret]
+ error_check_good dup_close:$dbcdup $stat 0
+ error_check_good dup_close:$dbcdup $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.g: Timeout, then activate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbc close} ret]
+ error_check_good dbc_close:$dbc $stat 0
+ error_check_good dbc_close:$dbc $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.h: Timeout, then activate child txn"
+ tclsleep $sleeptime
+ set stat [catch {$child0 commit} ret]
+ error_check_good child_commit $stat 0
+ error_check_good child_commit:$child0 $ret 0
+
+ #
+ #
+ # Make sure that our nested txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $last_parent} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.i: Timeout, then activate nested txn"
+ tclsleep $sleeptime
+ set stat [catch {$last_txn commit} ret]
+ error_check_good lasttxn_commit $stat 0
+ error_check_good lasttxn_commit:$child0 $ret 0
+
+ #
+ # Make sure that our child txn is not timed out. We should
+ # be able to commit it.
+ #
+ set stat [catch {$child1 commit} ret]
+ error_check_good child_commit:$child1 $stat 0
+ error_check_good child_commit:$child1 $ret 0
+
+ #
+ # Clean up. They were inserted in LIFO order, so we should
+ # just be able to commit them all.
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 0
+
+ rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
+ rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
+
+ #
+ # We need a 2nd env just to do an op to timeout the env.
+ # Make the flags different so we don't end up sharing a handle.
+ #
+ set env1 [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000}]
+ error_check_good lock_env:open [is_valid_env $env1] TRUE
+
+ puts "\tRpc001.l: Timeout idle env handle"
+ set sleeptime [expr $itime + 2]
+ tclsleep $sleeptime
+
+ set stat [catch {$env1 close} ret]
+ error_check_good env1_close $stat 0
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_timeout \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ tclkill $dpid
+}
+
+proc rpc_timeoutjoin {env msg sleeptime use_txn} {
+ #
+ # Check join cursors now.
+ #
+ puts -nonewline "\t$msg: Test join cursors and timeouts"
+ if { $use_txn } {
+ puts " (using txns)"
+ set txnflag "-auto_commit"
+ } else {
+ puts " (without txns)"
+ set txnflag ""
+ }
+ #
+ # Set up a simple set of join databases
+ #
+ puts "\t${msg}0: Set up join databases"
+ set fruit {
+ {blue blueberry}
+ {red apple} {red cherry} {red raspberry}
+ {yellow lemon} {yellow pear}
+ }
+ set price {
+ {expen blueberry} {expen cherry} {expen raspberry}
+ {inexp apple} {inexp lemon} {inexp pear}
+ }
+ set dessert {
+ {blueberry cobbler} {cherry cobbler} {pear cobbler}
+ {apple pie} {raspberry pie} {lemon pie}
+ }
+ set fdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup fruit.db]
+ error_check_good dbopen [is_valid_db $fdb] TRUE
+ set pdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup price.db]
+ error_check_good dbopen [is_valid_db $pdb] TRUE
+ set ddb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup dessert.db]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+ foreach kd $fruit {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$fdb put} $txnflag {$k $d}]
+ error_check_good fruit_put $ret 0
+ }
+ error_check_good sync [$fdb sync] 0
+ foreach kd $price {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$pdb put} $txnflag {$k $d}]
+ error_check_good price_put $ret 0
+ }
+ error_check_good sync [$pdb sync] 0
+ foreach kd $dessert {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$ddb put} $txnflag {$k $d}]
+ error_check_good dessert_put $ret 0
+ }
+ error_check_good sync [$ddb sync] 0
+
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
+
+ error_check_good ddb:close [$ddb close] 0
+ error_check_good pdb:close [$pdb close] 0
+ error_check_good fdb:close [$fdb close] 0
+}
+
+proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
+ global errorInfo
+
+ #
+ # Start a parent and child transaction. We'll do our join in
+ # the child transaction just to make sure everything gets timed
+ # out correctly.
+ #
+ set curs_list {}
+ set txn_list {}
+ set msgnum [expr $op * 2 + 1]
+ if { $use_txn } {
+ puts "\t$msg$msgnum: Set up txns and join cursor"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set child0 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child0]
+ set child1 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child1]
+ set txncmd "-txn $child0"
+ } else {
+ puts "\t$msg$msgnum: Set up join cursor"
+ set txncmd ""
+ }
+
+ #
+ # Start a cursor, (using txn child0 in the fruit and price dbs, if
+ # needed). # Just pick something simple to join on.
+ # Then call join on the dessert db.
+ #
+ set fkey yellow
+ set pkey inexp
+ set fdbc [eval $fdb cursor $txncmd]
+ error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
+ set ret [$fdbc get -set $fkey]
+ error_check_bad fget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good fget:set:key $k $fkey
+ set curs_list [linsert $curs_list 0 $fdbc]
+
+ set pdbc [eval $pdb cursor $txncmd]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set ret [$pdbc get -set $pkey]
+ error_check_bad pget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good pget:set:key $k $pkey
+ set curs_list [linsert $curs_list 0 $pdbc]
+
+ set jdbc [$ddb join $fdbc $pdbc]
+ error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
+ set ret [$jdbc get]
+ error_check_bad jget [llength $ret] 0
+
+ set msgnum [expr $op * 2 + 2]
+ if { $op == 1 } {
+ puts -nonewline "\t$msg$msgnum: Timeout all cursors"
+ if { $use_txn } {
+ puts " and txns"
+ } else {
+ puts ""
+ }
+ } else {
+ puts "\t$msg$msgnum: Timeout, then activate join cursor"
+ }
+
+ tclsleep $sleep
+
+ if { $op == 1 } {
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ #
+ # Check that join cursor is timed out.
+ #
+ set stat [catch {$jdbc close} ret]
+ error_check_good dbc_close:$jdbc $stat 1
+ error_check_good dbc_timeout:$jdbc \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ #
+ # Now the server may or may not timeout constituent
+ # cursors when it times out the join cursor. So, just
+ # sleep again and then they should timeout.
+ #
+ tclsleep $sleep
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ } else {
+ set stat [catch {$jdbc get} ret]
+ error_check_good jget.stat $stat 0
+ error_check_bad jget [llength $ret] 0
+ set curs_list [linsert $curs_list 0 $jdbc]
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 0
+ error_check_good dbc_close:$c $ret 0
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+}
diff --git a/storage/bdb/test/rpc002.tcl b/storage/bdb/test/rpc002.tcl
new file mode 100644
index 00000000000..4b69265bf3a
--- /dev/null
+++ b/storage/bdb/test/rpc002.tcl
@@ -0,0 +1,143 @@
+# Sel the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc002.tcl,v 1.17 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
+proc rpc002 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ set testfile "rpc002.db"
+ set home [file tail $rpc_testdir]
+ #
+ # First start the server.
+ #
+ puts "Rpc002: Unsupported interface test"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc002.a: Started server, pid $dpid"
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc002.b: Unsupported env options"
+ #
+ # Test each "pre-open" option for env's. These need to be
+ # tested on the 'berkdb_env' line.
+ #
+ set rlist {
+ { "-data_dir $rpc_testdir" "Rpc002.b0"}
+ { "-log_buffer 512" "Rpc002.b1"}
+ { "-log_dir $rpc_testdir" "Rpc002.b2"}
+ { "-log_max 100" "Rpc002.b3"}
+ { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
+ { "-lock_detect default" "Rpc002.b5"}
+ { "-lock_max 100" "Rpc002.b6"}
+ { "-mmapsize 100" "Rpc002.b7"}
+ { "-shm_key 100" "Rpc002.b9"}
+ { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
+ { "-txn_max 100" "Rpc002.b11"}
+ { "-txn_timestamp 100" "Rpc002.b12"}
+ { "-verbose {recovery on}" "Rpc002.b13"}
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+
+ #
+ # Open an env with all the subsystems (-txn implies all
+ # the rest)
+ #
+ puts "\tRpc002.c: Unsupported env related interfaces"
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ $testfile"
+ set db [eval $dbcmd]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Test each "post-open" option relating to envs, txns, locks,
+ # logs and mpools.
+ #
+ set rlist {
+ { " lock_detect default" "Rpc002.c0"}
+ { " lock_get read 1 $env" "Rpc002.c1"}
+ { " lock_id" "Rpc002.c2"}
+ { " lock_stat" "Rpc002.c3"}
+ { " lock_vec 1 {get $env read}" "Rpc002.c4"}
+ { " log_archive" "Rpc002.c5"}
+ { " log_file {0 0}" "Rpc002.c6"}
+ { " log_flush" "Rpc002.c7"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $env $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+ error_check_good dbclose [$db close] 0
+
+ #
+ # The database operations that aren't supported are few
+ # because mostly they are the ones Tcl doesn't support
+ # either so we have no way to get at them. Test what we can.
+ #
+ puts "\tRpc002.d: Unsupported database related interfaces"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ #
+ puts "\tRpc002.d0: -cachesize"
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ -cachesize {0 65536 0} $testfile"
+ set stat [catch {eval $dbcmd} ret]
+ error_check_good dbopen_cache $stat 1
+ error_check_good dbopen_cache_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ puts "\tRpc002.d1: Try to upgrade a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
+ error_check_good dbupgrade $stat 1
+ error_check_good dbupgrade_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ error_check_good envclose [$env close] 0
+
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rpc003.tcl b/storage/bdb/test/rpc003.tcl
new file mode 100644
index 00000000000..76f0dca6c07
--- /dev/null
+++ b/storage/bdb/test/rpc003.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc003.tcl,v 11.9 2002/07/16 20:53:03 bostic Exp $
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/storage/bdb/test/rpc004.tcl b/storage/bdb/test/rpc004.tcl
new file mode 100644
index 00000000000..ca1462f3a89
--- /dev/null
+++ b/storage/bdb/test/rpc004.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc004.tcl,v 11.6 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ cleanup $testdir NULL
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ }
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rpc005.tcl b/storage/bdb/test/rpc005.tcl
new file mode 100644
index 00000000000..f46e7355e5a
--- /dev/null
+++ b/storage/bdb/test/rpc005.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc005.tcl,v 11.4 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc005.c: Compare identical and different configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rsrc001.tcl b/storage/bdb/test/rsrc001.tcl
new file mode 100644
index 00000000000..1d57769fda2
--- /dev/null
+++ b/storage/bdb/test/rsrc001.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc001.tcl,v 11.23 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc001 { } {
+ source ./include.tcl
+
+ puts "Rsrc001: Basic recno backing file writeback tests"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ set rec1 "This is record 1"
+ set rec2 "This is record 2 This is record 2"
+ set rec3 "This is record 3 This is record 3 This is record 3"
+ set rec4 [replicate "This is record 4 " 512]
+
+ foreach testfile { "$testdir/rsrc001.db" "" } {
+
+ cleanup $testdir NULL
+
+ if { $testfile == "" } {
+ puts "Rsrc001: Testing with in-memory database."
+ } else {
+ puts "Rsrc001: Testing with disk-backed database."
+ }
+
+ # Create backing file for the empty-file test.
+ set oid1 [open $testdir/rsrc.txt w]
+ close $oid1
+
+ puts "\tRsrc001.a: Put to empty file."
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+
+ set ret [eval {$db put} $txn {1 $rec1}]
+ error_check_good put_to_empty $ret 0
+ error_check_good db_close [$db close] 0
+
+ # Now fill out the backing file and create the check file.
+ set oid1 [open $testdir/rsrc.txt a]
+ set oid2 [open $testdir/check.txt w]
+
+ # This one was already put into rsrc.txt.
+ puts $oid2 $rec1
+
+ # These weren't.
+ puts $oid1 $rec2
+ puts $oid2 $rec2
+ puts $oid1 $rec3
+ puts $oid2 $rec3
+ puts $oid1 $rec4
+ puts $oid2 $rec4
+ close $oid1
+ close $oid2
+
+ puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc001.c: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.d: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record (set 2) $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.e: Put beyond end of file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ puts $oid ""
+ incr key
+ }
+ set rec "Last Record"
+ puts $oid $rec
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+
+ puts "\tRsrc001.f: Put beyond end of file, after reopen."
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Last record with reopen"
+ puts $oid $rec
+
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ puts "\tRsrc001.g:\
+ Put several beyond end of file, after reopen with snapshot."
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Really really last record with reopen"
+ puts $oid ""
+ puts $oid ""
+ puts $oid ""
+ puts $oid $rec
+
+ incr key
+ incr key
+ incr key
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.h: Verify proper syncing of changes on close."
+ error_check_good Rsrc001:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return; we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc001:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
+ }
+}
+
+# Strip CRs from a record.
+# Needed on Windows when a file is created as text (with CR/LF)
+# but read as binary (where CR is read as a separate character)
+proc sanitize_record { rec } {
+ source ./include.tcl
+
+ if { $is_windows_test != 1 } {
+ return $rec
+ }
+ regsub -all \15 $rec "" data
+ return $data
+}
diff --git a/storage/bdb/test/rsrc002.tcl b/storage/bdb/test/rsrc002.tcl
new file mode 100644
index 00000000000..0cb3cf752e6
--- /dev/null
+++ b/storage/bdb/test/rsrc002.tcl
@@ -0,0 +1,66 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc002.tcl,v 11.14 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
+proc rsrc002 { } {
+ source ./include.tcl
+
+ puts "Rsrc002: Alternate variable-length record delimiters."
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ foreach testfile { "$testdir/rsrc002.db" "" } {
+
+ cleanup $testdir NULL
+
+ # Create the starting files
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
+ puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
+ close $oid1
+ close $oid2
+
+ if { $testfile == "" } {
+ puts "Rsrc002: Testing with in-memory database."
+ } else {
+ puts "Rsrc002: Testing with disk-backed database."
+ }
+
+ puts "\tRsrc002.a: Read file, verify correctness."
+ set db [eval {berkdb_open -create -mode 0644 -delim 58 \
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -first]
+ error_check_good get_first $rec [list [list 1 "ostrich"]]
+ set rec [$dbc get -next]
+ error_check_good get_next $rec [list [list 2 "emu"]]
+
+ puts "\tRsrc002.b: Write record, verify correctness."
+
+ eval {$dbc get -set 4}
+ set ret [$dbc put -current "penguin"]
+ error_check_good dbc_put $ret 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ error_check_good \
+ Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+ }
+}
diff --git a/storage/bdb/test/rsrc003.tcl b/storage/bdb/test/rsrc003.tcl
new file mode 100644
index 00000000000..f357a1e7f80
--- /dev/null
+++ b/storage/bdb/test/rsrc003.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc003.tcl,v 11.5 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc003 { } {
+ source ./include.tcl
+ global fixed_len
+
+ puts "Rsrc003: Basic recno backing file writeback tests fixed length"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ #
+ # Then run with big fixed-length records
+ set rec1 "This is record 1"
+ set rec2 "This is record 2"
+ set rec3 "This is record 3"
+ set bigrec1 [replicate "This is record 1 " 512]
+ set bigrec2 [replicate "This is record 2 " 512]
+ set bigrec3 [replicate "This is record 3 " 512]
+
+ set orig_fixed_len $fixed_len
+ set rlist {
+ {{$rec1 $rec2 $rec3} "small records" }
+ {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
+
+ foreach testfile { "$testdir/rsrc003.db" "" } {
+
+ foreach rec $rlist {
+ cleanup $testdir NULL
+
+ set recs [lindex $rec 0]
+ set msg [lindex $rec 1]
+ # Create the starting files
+ # Note that for the rest of the test, we are going
+ # to append a LF when we 'put' via DB to maintain
+ # file structure and allow us to use 'gets'.
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ foreach record $recs {
+ set r [subst $record]
+ set fixed_len [string length $r]
+ puts $oid1 $r
+ puts $oid2 $r
+ }
+ close $oid1
+ close $oid2
+
+ set reclen [expr $fixed_len + 1]
+ if { $reclen > [string length $rec1] } {
+ set repl 512
+ } else {
+ set repl 2
+ }
+ if { $testfile == "" } {
+ puts \
+"Rsrc003: Testing with in-memory database with $msg."
+ } else {
+ puts \
+"Rsrc003: Testing with disk-backed database with $msg."
+ }
+
+ puts -nonewline \
+ "\tRsrc003.a: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (don't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ append str \12
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ diff1($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc003.b: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "This is New Record $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff2($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc003.c: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 2) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff3($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts \
+"\tRsrc003.d: Verify proper syncing of changes on close."
+ error_check_good Rsrc003:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 3) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return;
+ # we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc003:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff5($testdir/{rsrc,check}.txt) $ret 0
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/storage/bdb/test/rsrc004.tcl b/storage/bdb/test/rsrc004.tcl
new file mode 100644
index 00000000000..f6c2f997eb8
--- /dev/null
+++ b/storage/bdb/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc004.tcl,v 11.3 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/scr001/chk.code b/storage/bdb/test/scr001/chk.code
new file mode 100644
index 00000000000..eb01d8614b3
--- /dev/null
+++ b/storage/bdb/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# $Id: chk.code,v 1.10 2002/02/04 16:03:26 bostic Exp $
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/storage/bdb/test/scr002/chk.def b/storage/bdb/test/scr002/chk.def
new file mode 100644
index 00000000000..7d5e6670f63
--- /dev/null
+++ b/storage/bdb/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/storage/bdb/test/scr003/chk.define b/storage/bdb/test/scr003/chk.define
new file mode 100644
index 00000000000..f73355eddf6
--- /dev/null
+++ b/storage/bdb/test/scr003/chk.define
@@ -0,0 +1,77 @@
+#!/bin/sh -
+#
+# $Id: chk.define,v 1.21 2002/03/27 04:32:58 bostic Exp $
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ_/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^LIST_/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SH_CIRCLEQ_/d' \
+ -e '/^SH_LIST_/d' \
+ -e '/^SH_TAILQ_/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/storage/bdb/test/scr004/chk.javafiles b/storage/bdb/test/scr004/chk.javafiles
new file mode 100644
index 00000000000..d30c5e3e779
--- /dev/null
+++ b/storage/bdb/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.javafiles,v 1.5 2002/01/30 19:50:52 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+j=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr005/chk.nl b/storage/bdb/test/scr005/chk.nl
new file mode 100644
index 00000000000..47c7ff74d4b
--- /dev/null
+++ b/storage/bdb/test/scr005/chk.nl
@@ -0,0 +1,112 @@
+#!/bin/sh -
+#
+# $Id: chk.nl,v 1.6 2002/01/07 15:12:12 bostic Exp $
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr006/chk.offt b/storage/bdb/test/scr006/chk.offt
new file mode 100644
index 00000000000..6800268d2a2
--- /dev/null
+++ b/storage/bdb/test/scr006/chk.offt
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# $Id: chk.offt,v 1.9 2001/10/26 13:40:15 bostic Exp $
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr007/chk.proto b/storage/bdb/test/scr007/chk.proto
new file mode 100644
index 00000000000..ae406fa23fe
--- /dev/null
+++ b/storage/bdb/test/scr007/chk.proto
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id: chk.proto,v 1.8 2002/03/27 04:32:59 bostic Exp $
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -low $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr008/chk.pubdef b/storage/bdb/test/scr008/chk.pubdef
new file mode 100644
index 00000000000..4f59e831b25
--- /dev/null
+++ b/storage/bdb/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/dbinc/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/dbinc/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/storage/bdb/test/scr009/chk.srcfiles b/storage/bdb/test/scr009/chk.srcfiles
new file mode 100644
index 00000000000..4f09a2890f6
--- /dev/null
+++ b/storage/bdb/test/scr009/chk.srcfiles
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id: chk.srcfiles,v 1.10 2002/02/04 22:25:33 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^test\//d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr010/chk.str b/storage/bdb/test/scr010/chk.str
new file mode 100644
index 00000000000..2b5698c0ff2
--- /dev/null
+++ b/storage/bdb/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.str,v 1.5 2001/10/12 17:55:36 bostic Exp $
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr010/spell.ok b/storage/bdb/test/scr010/spell.ok
new file mode 100644
index 00000000000..18af8d1306d
--- /dev/null
+++ b/storage/bdb/test/scr010/spell.ok
@@ -0,0 +1,825 @@
+AES
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Aclmop
+Ahlm
+Ahm
+BCFILprRsvVxX
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+Bc
+BerkeleyDB
+BtRecExample
+Btree
+CD
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFLprsvVxX
+CFh
+CHKSUM
+CLpsvxX
+CONFIG
+CdFILTvX
+ClassNotFoundException
+Config
+DBC
+DBENV
+DBP
+DBS
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+DUPSORT
+Db
+DbAppendRecno
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUtil
+DbXAResource
+DbXid
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+Fh
+FileNotFoundException
+GetFileInformationByHandle
+GetJavaVM
+GetJoin
+HOFFSET
+HOLDELECTION
+Hashtable
+ILo
+ILprR
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+LprRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NOTYPE
+NOWAIT
+NP
+NoP
+NoqV
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PGNO
+PID
+PREV
+Pgno
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SHA
+SS
+Shm
+Sleepycat
+Subdatabase
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Tt
+Txn
+Txnid
+Txns
+UID
+UNAVAIL
+USERMEM
+Unencrypted
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XAException
+Xid
+XxZ
+YIELDCPU
+YY
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+appname
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+cksum
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbreg
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+dev
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+ds
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupset
+dupsort
+efh
+eid
+electinit
+electsend
+electvote
+electwait
+encryptaes
+encryptany
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fid
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+func
+fv
+gbytes
+gc'ed
+gen
+getBranchQualifier
+getFormatId
+getGlobalTransactionId
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+intValue
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+javax
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+killtest
+klNpP
+klNprRV
+klNprRs
+krinsky
+lM
+lP
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+logsonly
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNP
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mjc
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newclient
+newfile
+newitem
+newmaster
+newname
+newpage
+newpgno
+newsite
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+notgranted
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+ok
+olddata
+olditem
+oldname
+opd
+opflags
+opmods
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+passwd
+perf
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pkey
+plist
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prR
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qV
+qam
+qs
+qtest
+rRV
+rRs
+rV
+rand
+rcuradj
+rdonly
+readd
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+recs
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmid
+rmw
+ro
+rootent
+rootlsn
+rpc
+rpcid
+rs
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+sS
+sV
+sVv
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strcmp
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+thr
+threadID
+tid
+tiebreaker
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+util
+vVxXZ
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wrnosync
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/storage/bdb/test/scr011/chk.tags b/storage/bdb/test/scr011/chk.tags
new file mode 100644
index 00000000000..14a3c4e011d
--- /dev/null
+++ b/storage/bdb/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# $Id: chk.tags,v 1.10 2001/10/12 17:55:36 bostic Exp $
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/storage/bdb/test/scr012/chk.vx_code b/storage/bdb/test/scr012/chk.vx_code
new file mode 100644
index 00000000000..8d7ca608f93
--- /dev/null
+++ b/storage/bdb/test/scr012/chk.vx_code
@@ -0,0 +1,68 @@
+#!/bin/sh -
+#
+# $Id: chk.vx_code,v 1.6 2002/03/27 20:20:25 bostic Exp $
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr013/chk.stats b/storage/bdb/test/scr013/chk.stats
new file mode 100644
index 00000000000..3a404699668
--- /dev/null
+++ b/storage/bdb/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id: chk.stats,v 1.6 2002/08/19 18:35:18 bostic Exp $
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/lock/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/log/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/memp/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/txn/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/db/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/lock/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/log/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/memp/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/txn/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/storage/bdb/test/scr014/chk.err b/storage/bdb/test/scr014/chk.err
new file mode 100644
index 00000000000..72b4a62719f
--- /dev/null
+++ b/storage/bdb/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/storage/bdb/test/scr015/README b/storage/bdb/test/scr015/README
new file mode 100644
index 00000000000..75a356eea06
--- /dev/null
+++ b/storage/bdb/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr015/TestConstruct01.cpp b/storage/bdb/test/scr015/TestConstruct01.cpp
new file mode 100644
index 00000000000..7ae328d458c
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.cpp,v 1.5 2002/01/23 14:26:40 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestConstruct01.testerr b/storage/bdb/test/scr015/TestConstruct01.testerr
new file mode 100644
index 00000000000..1ba627d103b
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/storage/bdb/test/scr015/TestConstruct01.testout b/storage/bdb/test/scr015/TestConstruct01.testout
new file mode 100644
index 00000000000..9b840f9fcf4
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/storage/bdb/test/scr015/TestExceptInclude.cpp b/storage/bdb/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 00000000000..28bc498222f
--- /dev/null
+++ b/storage/bdb/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestExceptInclude.cpp,v 1.4 2002/07/05 22:17:59 dda Exp $
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class. We do need db.h to get a few
+ * typedefs defined that the DbException classes use.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include <db.h>
+#include <cxx_except.h>
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/storage/bdb/test/scr015/TestGetSetMethods.cpp b/storage/bdb/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 00000000000..81ef914eac3
--- /dev/null
+++ b/storage/bdb/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.cpp,v 1.4 2002/01/11 15:53:59 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestKeyRange.cpp b/storage/bdb/test/scr015/TestKeyRange.cpp
new file mode 100644
index 00000000000..980d2f518e0
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.cpp,v 1.4 2002/01/23 14:26:41 bostic Exp $
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/storage/bdb/test/scr015/TestKeyRange.testin b/storage/bdb/test/scr015/TestKeyRange.testin
new file mode 100644
index 00000000000..a2b6bd74e7b
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/storage/bdb/test/scr015/TestKeyRange.testout b/storage/bdb/test/scr015/TestKeyRange.testout
new file mode 100644
index 00000000000..25b2e1a835c
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/storage/bdb/test/scr015/TestLogc.cpp b/storage/bdb/test/scr015/TestLogc.cpp
new file mode 100644
index 00000000000..94fcfa0b3ec
--- /dev/null
+++ b/storage/bdb/test/scr015/TestLogc.cpp
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.cpp,v 1.6 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestLogc.testout b/storage/bdb/test/scr015/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/storage/bdb/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.cpp b/storage/bdb/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 00000000000..2450b9b3030
--- /dev/null
+++ b/storage/bdb/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.testout b/storage/bdb/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/storage/bdb/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr015/TestTruncate.cpp b/storage/bdb/test/scr015/TestTruncate.cpp
new file mode 100644
index 00000000000..d5c0dc6de29
--- /dev/null
+++ b/storage/bdb/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestTruncate.testout b/storage/bdb/test/scr015/TestTruncate.testout
new file mode 100644
index 00000000000..0a4bc98165d
--- /dev/null
+++ b/storage/bdb/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr015/chk.cxxtests b/storage/bdb/test/scr015/chk.cxxtests
new file mode 100644
index 00000000000..5c21e27208c
--- /dev/null
+++ b/storage/bdb/test/scr015/chk.cxxtests
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id: chk.cxxtests,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/storage/bdb/test/scr015/ignore b/storage/bdb/test/scr015/ignore
new file mode 100644
index 00000000000..55ce82ae372
--- /dev/null
+++ b/storage/bdb/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $
+#
+# A list of tests to ignore
diff --git a/storage/bdb/test/scr015/testall b/storage/bdb/test/scr015/testall
new file mode 100644
index 00000000000..a2d493a8b22
--- /dev/null
+++ b/storage/bdb/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/storage/bdb/test/scr015/testone b/storage/bdb/test/scr015/testone
new file mode 100644
index 00000000000..3bbba3f90f0
--- /dev/null
+++ b/storage/bdb/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/storage/bdb/test/scr016/CallbackTest.java b/storage/bdb/test/scr016/CallbackTest.java
new file mode 100644
index 00000000000..eede964a027
--- /dev/null
+++ b/storage/bdb/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/storage/bdb/test/scr016/CallbackTest.testout b/storage/bdb/test/scr016/CallbackTest.testout
new file mode 100644
index 00000000000..68797d4a2de
--- /dev/null
+++ b/storage/bdb/test/scr016/CallbackTest.testout
@@ -0,0 +1,60 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key9
+compare function called
+ key1, key0
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/storage/bdb/test/scr016/README b/storage/bdb/test/scr016/README
new file mode 100644
index 00000000000..226a8aa3b77
--- /dev/null
+++ b/storage/bdb/test/scr016/README
@@ -0,0 +1,37 @@
+# $Id: README,v 1.2 2001/05/31 23:09:10 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr016/TestAppendRecno.java b/storage/bdb/test/scr016/TestAppendRecno.java
new file mode 100644
index 00000000000..f4ea70ca084
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAppendRecno.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAppendRecno.java,v 1.4 2002/08/16 19:35:53 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ table.set_append_recno(this);
+
+ table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/storage/bdb/test/scr016/TestAppendRecno.testout b/storage/bdb/test/scr016/TestAppendRecno.testout
new file mode 100644
index 00000000000..970174e7a96
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/storage/bdb/test/scr016/TestAssociate.java b/storage/bdb/test/scr016/TestAssociate.java
new file mode 100644
index 00000000000..4105b9cb0a1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAssociate.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(null, FileName + "2", null,
+ Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(null, table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/storage/bdb/test/scr016/TestAssociate.testout b/storage/bdb/test/scr016/TestAssociate.testout
new file mode 100644
index 00000000000..34414b660d1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/storage/bdb/test/scr016/TestClosedDb.java b/storage/bdb/test/scr016/TestClosedDb.java
new file mode 100644
index 00000000000..3bd6e5380f8
--- /dev/null
+++ b/storage/bdb/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestClosedDb.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestClosedDb.testout b/storage/bdb/test/scr016/TestClosedDb.testout
new file mode 100644
index 00000000000..ce13883f63a
--- /dev/null
+++ b/storage/bdb/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/storage/bdb/test/scr016/TestConstruct01.java b/storage/bdb/test/scr016/TestConstruct01.java
new file mode 100644
index 00000000000..b60073ebc0d
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.java,v 1.6 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(null, name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/storage/bdb/test/scr016/TestConstruct01.testerr b/storage/bdb/test/scr016/TestConstruct01.testerr
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.testerr
diff --git a/storage/bdb/test/scr016/TestConstruct01.testout b/storage/bdb/test/scr016/TestConstruct01.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/storage/bdb/test/scr016/TestConstruct02.java b/storage/bdb/test/scr016/TestConstruct02.java
new file mode 100644
index 00000000000..5bbb55ccd56
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct02.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct02.java,v 1.6 2002/08/16 19:35:54 dda Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+ if (count >= 64)
+ throw new IllegalArgumentException("rundb count arg >= 64");
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ byte[] key_bytes = readkey.get_data();
+ byte[] data_bytes = readdata.get_data();
+
+ int len = key_bytes.length;
+ if (len != data_bytes.length) {
+ ERR("key and data are different");
+ }
+ for (i=0; i<len-1; i++) {
+ byte want = (byte)('0' + i);
+ if (key_bytes[i] != want || data_bytes[i] != want) {
+ System.out.println(" got " + new String(key_bytes) +
+ "/" + new String(data_bytes));
+ ERR("key or data is corrupt");
+ }
+ }
+ if (len <= 0 ||
+ key_bytes[len-1] != (byte)'x' ||
+ data_bytes[len-1] != (byte)'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len);
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ bitmap |= bit;
+ expected &= ~(bit);
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " +
+ expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall()
+ {
+ itemcount = 0;
+ try {
+ VERBOSEOUT(" Running test 1:\n");
+ t1();
+ VERBOSEOUT(" finished.\n");
+ removeall(true, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall()) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/storage/bdb/test/scr016/TestConstruct02.testout b/storage/bdb/test/scr016/TestConstruct02.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/storage/bdb/test/scr016/TestDbtFlags.java b/storage/bdb/test/scr016/TestDbtFlags.java
new file mode 100644
index 00000000000..98527e6b3e7
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestDbtFlags.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "If we shadows have offended",
+ "Think but this, and all is mended",
+ "That you have but slumber'd here",
+ "While these visions did appear",
+ "And this weak and idle theme",
+ "No more yielding but a dream",
+ "Gentles, do not reprehend",
+ "if you pardon, we will mend",
+ "And, as I am an honest Puck, if we have unearned luck",
+ "Now to 'scape the serpent's tongue, we will make amends ere long;",
+ "Else the Puck a liar call; so, good night unto you all.",
+ "Give me your hands, if we be friends, and Robin shall restore amends."
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testerr b/storage/bdb/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 00000000000..7666868ebd4
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,54 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 20
+ data size: 53 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 25 ulen: 20
+ data size: 25 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 20
+ data size: 69 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 20
+ data size: 65 ulen: 20
+exception, iteration 8: Dbt not large enough for available data
+ key size: 32 ulen: 20
+ data size: 32 ulen: 20
+exception, iteration 9: Dbt not large enough for available data
+ key size: 33 ulen: 20
+ data size: 33 ulen: 20
+exception, iteration 10: Dbt not large enough for available data
+ key size: 30 ulen: 20
+ data size: 30 ulen: 20
+exception, iteration 11: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 50
+ data size: 53 ulen: 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 50
+ data size: 69 ulen: 50
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 50
+ data size: 65 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testout b/storage/bdb/test/scr016/TestDbtFlags.testout
new file mode 100644
index 00000000000..b8deb1bcc16
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,78 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 16 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 20
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 50
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 200
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 0 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/storage/bdb/test/scr016/TestGetSetMethods.java b/storage/bdb/test/scr016/TestGetSetMethods.java
new file mode 100644
index 00000000000..a1b2722d8fd
--- /dev/null
+++ b/storage/bdb/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.java,v 1.3 2002/01/11 15:54:02 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestKeyRange.java b/storage/bdb/test/scr016/TestKeyRange.java
new file mode 100644
index 00000000000..8eda2de426f
--- /dev/null
+++ b/storage/bdb/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestKeyRange.testout b/storage/bdb/test/scr016/TestKeyRange.testout
new file mode 100644
index 00000000000..c265f3289fb
--- /dev/null
+++ b/storage/bdb/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/storage/bdb/test/scr016/TestLockVec.java b/storage/bdb/test/scr016/TestLockVec.java
new file mode 100644
index 00000000000..ad48e9f2f9a
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLockVec.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestLockVec.testout b/storage/bdb/test/scr016/TestLockVec.testout
new file mode 100644
index 00000000000..1cf16c6ac4e
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/storage/bdb/test/scr016/TestLogc.java b/storage/bdb/test/scr016/TestLogc.java
new file mode 100644
index 00000000000..ec9c373a93b
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLogc.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.java,v 1.7 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // System.out.println("logc.get: " + count);
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** FAIL: logc.get returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ System.out.println("*** FAIL: not enough log records");
+
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("*** FAIL: Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/storage/bdb/test/scr016/TestLogc.testout b/storage/bdb/test/scr016/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.java b/storage/bdb/test/scr016/TestOpenEmpty.java
new file mode 100644
index 00000000000..ae92fd363d9
--- /dev/null
+++ b/storage/bdb/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestOpenEmpty.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.testerr b/storage/bdb/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 00000000000..dd3e01c7ab7
--- /dev/null
+++ b/storage/bdb/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/storage/bdb/test/scr016/TestReplication.java b/storage/bdb/test/scr016/TestReplication.java
new file mode 100644
index 00000000000..87cb683d60f
--- /dev/null
+++ b/storage/bdb/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestReplication.java,v 1.3 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/storage/bdb/test/scr016/TestRpcServer.java b/storage/bdb/test/scr016/TestRpcServer.java
new file mode 100644
index 00000000000..ef325cef075
--- /dev/null
+++ b/storage/bdb/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestRpcServer.java,v 1.3 2002/01/11 15:54:03 bostic Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestSameDbt.java b/storage/bdb/test/scr016/TestSameDbt.java
new file mode 100644
index 00000000000..9866ed49307
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSameDbt.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestSameDbt.testout b/storage/bdb/test/scr016/TestSameDbt.testout
new file mode 100644
index 00000000000..be4bbbe59e9
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.java b/storage/bdb/test/scr016/TestSimpleAccess.java
new file mode 100644
index 00000000000..ba7390cada1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.java,v 1.5 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ TestUtil.populate(db);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.testout b/storage/bdb/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr016/TestStat.java b/storage/bdb/test/scr016/TestStat.java
new file mode 100644
index 00000000000..55ba9823115
--- /dev/null
+++ b/storage/bdb/test/scr016/TestStat.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestStat.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestStat
+{
+ public static void main(String[] args)
+ {
+ int envflags =
+ Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_TXN | Db.DB_CREATE;
+ try {
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open(".", envflags, 0);
+ Db db = new Db(dbenv, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+
+ TestUtil.populate(db);
+ System.out.println("BtreeStat:");
+ DbBtreeStat stat = (DbBtreeStat)db.stat(0);
+ System.out.println(" bt_magic: " + stat.bt_magic);
+
+ System.out.println("LogStat:");
+ DbLogStat logstat = dbenv.log_stat(0);
+ System.out.println(" st_magic: " + logstat.st_magic);
+ System.out.println(" st_cur_file: " + logstat.st_cur_file);
+
+ System.out.println("RepStat:");
+ DbRepStat repstat = dbenv.rep_stat(0);
+ System.out.println(" st_status: " + repstat.st_status);
+ System.out.println(" st_log_duplication: " +
+ repstat.st_log_duplicated);
+
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestStat.testout b/storage/bdb/test/scr016/TestStat.testout
new file mode 100644
index 00000000000..caf9db1fb13
--- /dev/null
+++ b/storage/bdb/test/scr016/TestStat.testout
@@ -0,0 +1,11 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+BtreeStat:
+ bt_magic: 340322
+LogStat:
+ st_magic: 264584
+ st_cur_file: 1
+RepStat:
+ st_status: 0
+ st_log_duplication: 0
+finished test
diff --git a/storage/bdb/test/scr016/TestTruncate.java b/storage/bdb/test/scr016/TestTruncate.java
new file mode 100644
index 00000000000..71377236246
--- /dev/null
+++ b/storage/bdb/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.java,v 1.4 2002/01/23 14:29:52 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestTruncate.testout b/storage/bdb/test/scr016/TestTruncate.testout
new file mode 100644
index 00000000000..23f291df754
--- /dev/null
+++ b/storage/bdb/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr016/TestUtil.java b/storage/bdb/test/scr016/TestUtil.java
new file mode 100644
index 00000000000..1bddfb0b014
--- /dev/null
+++ b/storage/bdb/test/scr016/TestUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestUtil.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Utilities used by many tests.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestUtil
+{
+ public static void populate(Db db)
+ throws DbException
+ {
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestXAServlet.java b/storage/bdb/test/scr016/TestXAServlet.java
new file mode 100644
index 00000000000..8b9fe57e261
--- /dev/null
+++ b/storage/bdb/test/scr016/TestXAServlet.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestXAServlet.java,v 1.1 2002/04/24 03:26:33 dda Exp $
+ */
+
+/*
+ * Simple test of XA, using WebLogic.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import com.sleepycat.db.xa.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import javax.servlet.*;
+import javax.servlet.http.*;
+import javax.transaction.*;
+import javax.transaction.xa.*;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import weblogic.transaction.TxHelper;
+import weblogic.transaction.TransactionManager;
+
+public class TestXAServlet extends HttpServlet
+{
+ public static final String ENV_HOME = "TESTXADIR";
+ public static final String DEFAULT_URL = "t3://localhost:7001";
+ public static String filesep = System.getProperty("file.separator");
+
+ private static TransactionManager tm;
+ private static DbXAResource xaresource;
+ private static boolean initialized = false;
+
+ /**
+ * Utility to remove files recursively.
+ */
+ public static void removeRecursive(File f)
+ {
+ if (f.isDirectory()) {
+ String[] sub = f.list();
+ for (int i=0; i<sub.length; i++)
+ removeRecursive(new File(f.getName() + filesep + sub[i]));
+ }
+ f.delete();
+ }
+
+ /**
+ * Typically done only once, unless shutdown is invoked. This
+ * sets up directories, and removes any work files from previous
+ * runs. Also establishes a transaction manager that we'll use
+ * for various transactions. Each call opens/creates a new DB
+ * environment in our work directory.
+ */
+ public static synchronized void startup()
+ {
+ if (initialized)
+ return;
+
+ try {
+ File dir = new File(ENV_HOME);
+ removeRecursive(dir);
+ dir.mkdirs();
+
+ System.out.println("Getting context");
+ InitialContext ic = getInitialContext(DEFAULT_URL);
+ System.out.println("Creating XAResource");
+ xaresource = new DbXAResource(ENV_HOME, 77, 0);
+ System.out.println("Registering with transaction manager");
+ tm = TxHelper.getTransactionManager();
+ tm.registerStaticResource("DbXA", xaresource);
+ initialized = true;
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+ initialized = true;
+ }
+
+ /**
+ * Closes the XA resource manager.
+ */
+ public static synchronized void shutdown(PrintWriter out)
+ throws XAException
+ {
+ if (!initialized)
+ return;
+
+ out.println("Closing the resource.");
+ xaresource.close(0);
+ out.println("Shutdown complete.");
+ initialized = false;
+ }
+
+
+ /**
+ * Should be called once per chunk of major activity.
+ */
+ public void initialize()
+ {
+ startup();
+ }
+
+ private static int count = 1;
+ private static boolean debugInited = false;
+ private Xid bogusXid;
+
+ public static synchronized int incrCount()
+ {
+ return count++;
+ }
+
+ public void debugSetup(PrintWriter out)
+ throws ServletException, IOException
+ {
+ try {
+ Db.load_db();
+ }
+ catch (Exception e) {
+ out.println("got exception during load: " + e);
+ System.out.println("got exception during load: " + e);
+ }
+ out.println("The servlet has been restarted, and Berkeley DB is loaded");
+ out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
+ }
+
+ public void doXATransaction(PrintWriter out, String key, String value,
+ String operation)
+ throws ServletException, IOException
+ {
+ try {
+ int counter = incrCount();
+ if (key == null || key.equals(""))
+ key = "key" + counter;
+ if (value == null || value.equals(""))
+ value = "value" + counter;
+
+ out.println("Adding (\"" + key + "\", \"" + value + "\")");
+
+ System.out.println("XA transaction begin");
+ tm.begin();
+ System.out.println("getting XA transaction");
+ DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
+ DbTxn txn = attach.get_txn();
+ DbEnv env = attach.get_env();
+ Db db = new Db(env, 0);
+ db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.out.println("DB put " + key);
+ db.put(txn,
+ new Dbt(key.getBytes()),
+ new Dbt(value.getBytes()),
+ 0);
+
+ if (operation.equals("rollback")) {
+ out.println("<p>ROLLBACK");
+ System.out.println("XA transaction rollback");
+ tm.rollback();
+ System.out.println("XA rollback returned");
+
+ // The old db is no good after the rollback
+ // since the open was part of the transaction.
+ // Get another db for the cursor dump
+ //
+ db = new Db(env, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ }
+ else {
+ out.println("<p>COMMITTED");
+ System.out.println("XA transaction commit");
+ tm.commit();
+ }
+
+ // Show the current state of the database.
+ Dbc dbc = db.cursor(null, 0);
+ Dbt gotkey = new Dbt();
+ Dbt gotdata = new Dbt();
+
+ out.println("<p>Current database values:");
+ while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
+ out.println("<br> " + getDbtString(gotkey) + " : "
+ + getDbtString(gotdata));
+ }
+ dbc.close();
+ db.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ out.println(" *** Exception received: " + dbe);
+ dbe.printStackTrace();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ out.println(" *** Exception received: " + fnfe);
+ fnfe.printStackTrace();
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ out.println(" *** Exception received: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static Xid getBogusXid()
+ throws XAException
+ {
+ return new DbXid(1, "BOGUS_gtrid".getBytes(),
+ "BOGUS_bqual".getBytes());
+ }
+
+ private static String getDbtString(Dbt dbt)
+ {
+ return new String(dbt.get_data(), 0, dbt.get_size());
+ }
+
+ /**
+ * doGet is called as a result of invoking the servlet.
+ */
+ public void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException
+ {
+ try {
+ resp.setContentType("text/html");
+ PrintWriter out = resp.getWriter();
+
+ String key = req.getParameter("key");
+ String value = req.getParameter("value");
+ String operation = req.getParameter("operation");
+
+ out.println("<HTML>");
+ out.println("<HEAD>");
+ out.println("<TITLE>Berkeley DB with XA</TITLE>");
+ out.println("</HEAD><BODY>");
+ out.println("<a href=\"TestXAServlet" +
+ "\">Database put and commit</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=rollback" +
+ "\">Database put and rollback</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=close" +
+ "\">Close the XA resource manager</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=forget" +
+ "\">Forget an operation (bypasses TM)</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=prepare" +
+ "\">Prepare an operation (bypasses TM)</a><br>");
+ out.println("<br>");
+
+ if (!debugInited) {
+ // Don't initialize XA yet, give the user
+ // a chance to attach a debugger if necessary.
+ debugSetup(out);
+ debugInited = true;
+ }
+ else {
+ initialize();
+ if (operation == null)
+ operation = "commit";
+
+ if (operation.equals("close")) {
+ shutdown(out);
+ }
+ else if (operation.equals("forget")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>FORGET");
+ System.out.println("XA forget bogus XID (bypass TM)");
+ xaresource.forget(getBogusXid());
+ }
+ else if (operation.equals("prepare")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>PREPARE");
+ System.out.println("XA prepare bogus XID (bypass TM)");
+ xaresource.prepare(getBogusXid());
+ }
+ else {
+ // commit, rollback, prepare, forget
+ doXATransaction(out, key, value, operation);
+ }
+ }
+ out.println("</BODY></HTML>");
+
+ System.out.println("Finished.");
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+
+ }
+
+
+ /**
+ * From weblogic's sample code:
+ * samples/examples/jta/jmsjdbc/Client.java
+ */
+ private static InitialContext getInitialContext(String url)
+ throws NamingException
+ {
+ Hashtable env = new Hashtable();
+ env.put(Context.INITIAL_CONTEXT_FACTORY,
+ "weblogic.jndi.WLInitialContextFactory");
+ env.put(Context.PROVIDER_URL, url);
+ return new InitialContext(env);
+ }
+
+}
diff --git a/storage/bdb/test/scr016/chk.javatests b/storage/bdb/test/scr016/chk.javatests
new file mode 100644
index 00000000000..34d7dfe78d7
--- /dev/null
+++ b/storage/bdb/test/scr016/chk.javatests
@@ -0,0 +1,79 @@
+#!/bin/sh -
+#
+# $Id: chk.javatests,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
+pwd
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/storage/bdb/test/scr016/ignore b/storage/bdb/test/scr016/ignore
new file mode 100644
index 00000000000..1dfaf6adea4
--- /dev/null
+++ b/storage/bdb/test/scr016/ignore
@@ -0,0 +1,22 @@
+#
+# $Id: ignore,v 1.4 2002/08/16 19:35:56 dda Exp $
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
+# These are currently not working
+TestAppendRecno
+TestAssociate
+TestLogc
+TestConstruct02
+
+# TestUtil is used by the other tests, it does not stand on its own
+TestUtil
+
+# XA needs a special installation, it is not part of testall
+TestXAServlet
diff --git a/storage/bdb/test/scr016/testall b/storage/bdb/test/scr016/testall
new file mode 100644
index 00000000000..a4e1b5a8c70
--- /dev/null
+++ b/storage/bdb/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/storage/bdb/test/scr016/testone b/storage/bdb/test/scr016/testone
new file mode 100644
index 00000000000..5f5d2e0017d
--- /dev/null
+++ b/storage/bdb/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/storage/bdb/test/scr017/O.BH b/storage/bdb/test/scr017/O.BH
new file mode 100644
index 00000000000..cd499d38779
--- /dev/null
+++ b/storage/bdb/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/storage/bdb/test/scr017/O.R b/storage/bdb/test/scr017/O.R
new file mode 100644
index 00000000000..d78a04727d8
--- /dev/null
+++ b/storage/bdb/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/storage/bdb/test/scr017/chk.db185 b/storage/bdb/test/scr017/chk.db185
new file mode 100644
index 00000000000..c2a07c51d26
--- /dev/null
+++ b/storage/bdb/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id: chk.db185,v 1.2 2001/10/12 17:55:38 bostic Exp $
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr017/t.c b/storage/bdb/test/scr017/t.c
new file mode 100644
index 00000000000..f03b33880d6
--- /dev/null
+++ b/storage/bdb/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/storage/bdb/test/scr018/chk.comma b/storage/bdb/test/scr018/chk.comma
new file mode 100644
index 00000000000..42df48d1881
--- /dev/null
+++ b/storage/bdb/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr018/t.c b/storage/bdb/test/scr018/t.c
new file mode 100644
index 00000000000..4056a605928
--- /dev/null
+++ b/storage/bdb/test/scr018/t.c
@@ -0,0 +1,46 @@
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <strings.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit (1);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/storage/bdb/test/scr019/chk.include b/storage/bdb/test/scr019/chk.include
new file mode 100644
index 00000000000..444217bedb4
--- /dev/null
+++ b/storage/bdb/test/scr019/chk.include
@@ -0,0 +1,40 @@
+#!/bin/sh -
+#
+# $Id: chk.include,v 1.3 2002/03/27 04:33:09 bostic Exp $
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_dump185/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^test_/d' \
+ -e '/^mutex\/tm.c/d' > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/storage/bdb/test/scr020/chk.inc b/storage/bdb/test/scr020/chk.inc
new file mode 100644
index 00000000000..189126b10c3
--- /dev/null
+++ b/storage/bdb/test/scr020/chk.inc
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id: chk.inc,v 1.1 2002/02/10 17:14:33 bostic Exp $
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') > $t1
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr021/chk.flags b/storage/bdb/test/scr021/chk.flags
new file mode 100644
index 00000000000..1b2bb62cca7
--- /dev/null
+++ b/storage/bdb/test/scr021/chk.flags
@@ -0,0 +1,97 @@
+#!/bin/sh -
+#
+# $Id: chk.flags,v 1.8 2002/08/14 02:19:55 bostic Exp $
+#
+# Check flag name-spaces.
+
+d=../..
+
+t1=__1
+
+# Check for DB_ENV flags.
+(grep 'F_ISSET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbenv,' $d/*/*.[chys]) |
+ sed -e '/DB_ENV_/d' -e '/F_SET([^ ]*dbenv, db_env_reset)/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_ENV_' $d/*/*.c |
+sed -e '/F_.*dbenv,/d' \
+ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \
+ -e '/\/libdb_java\//d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DB flags.
+(grep 'F_ISSET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbp,' $d/*/*.[chys]) |
+ sed -e '/DB_AM_/d' \
+ -e '/db.c:.*F_SET.*F_ISSET(subdbp,/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_AM_' $d/*/*.c |
+sed -e '/F_.*dbp/d' \
+ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \
+ -e '/"DB_NODUPDATA" behavior for databases with/d' \
+ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \
+ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \
+ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \
+ -e '/ DB_AM_RECNUM\./d' \
+ -e '/ DB_AM_RECOVER set\./d' \
+ -e '/isdup = dbp->flags & DB_AM_DUP/d' \
+ -e '/otherwise we simply do/d' \
+ -e '/pginfo/d' \
+ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \
+ -e '/:[ {]*DB_AM_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DBC flags.
+(grep 'F_ISSET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbc,' $d/*/*.[chys]) |
+ sed -e '/DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DBC_' $d/*/*.c |
+sed -e '/F_.*dbc/d' \
+ -e '/DBC_INTERNAL/d' \
+ -e '/DBC_LOGGING/d' \
+ -e '/Do the actual get. Set DBC_TRANSIENT/d' \
+ -e '/If DBC_WRITEDUP is set, the cursor is an in/d' \
+ -e '/The DBC_TRANSIENT flag indicates that we/d' \
+ -e '/This function replaces the DBC_CONTINUE and DBC_KEYSET/d' \
+ -e '/db_cam.c:.*F_CLR(opd, DBC_ACTIVE);/d' \
+ -e '/{ DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for bad use of macros.
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr022/chk.rr b/storage/bdb/test/scr022/chk.rr
new file mode 100644
index 00000000000..df230315299
--- /dev/null
+++ b/storage/bdb/test/scr022/chk.rr
@@ -0,0 +1,22 @@
+#!/bin/sh -
+#
+# $Id: chk.rr,v 1.1 2002/04/19 15:13:05 bostic Exp $
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/common\/db_err.c:/d' \
+ -e '/libdb_java\/java_util.c:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to db_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/sdb001.tcl b/storage/bdb/test/sdb001.tcl
new file mode 100644
index 00000000000..a03160e0ab7
--- /dev/null
+++ b/storage/bdb/test/sdb001.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb001.tcl,v 11.18 2002/06/10 15:39:36 sue Exp $
+#
+# TEST subdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
+proc subdb001 { method args } {
+ source ./include.tcl
+ global errorInfo
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
+ puts "Subdb001: $method ($args) subdb and non-subdb tests"
+
+ set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
+ set subdb subdb0
+ cleanup $testdir NULL
+ puts "\tSubdb001.a: Non-subdb database and subdb operations"
+ #
+ # Create a db with no subdbs. Add some data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+ #
+ # Create a db with no subdbs. Add no data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ set testfile $testdir/subdb001a.db
+ puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping remainder of test for method $method"
+ return
+ }
+
+ #
+ # Test naming, db and subdb names beginning with -.
+ #
+ puts "\tSubdb001.b: Naming"
+ set cwd [pwd]
+ cd $testdir
+ set testfile1 -subdb001.db
+ set subdb -subdb
+ puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
+ set ret [catch {eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile1 $subdb}} db]
+ error_check_bad dbopen $ret 0
+ puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod -- $testfile1 $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ cd $cwd
+
+ #
+ # Create 1 db with 1 subdb. Try to create another subdb of
+ # the same name. Should fail.
+ #
+ puts "\tSubdb001.c: Truncate check"
+ set testfile $testdir/subdb001c.db
+ set subdb subdb
+ set stat [catch {eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $args {$omethod $testfile $subdb}} ret]
+ error_check_bad dbopen $stat 0
+ error_check_good trunc [is_substr $ret \
+ "illegal with multiple databases"] 1
+
+ puts "\tSubdb001.d: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
+ set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
+ $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+ error_check_good db_close [$db close] 0
+
+ return
+}
diff --git a/storage/bdb/test/sdb002.tcl b/storage/bdb/test/sdb002.tcl
new file mode 100644
index 00000000000..4757e12afc7
--- /dev/null
+++ b/storage/bdb/test/sdb002.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb002.tcl,v 11.35 2002/08/23 18:01:53 sandstro Exp $
+#
+# TEST subdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
+proc subdb002 { method {nentries 10000} args } {
+ global passwd
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
+ source ./include.tcl
+ global encrypt
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ puts "Subdb002: $method ($largs) basic subdb tests"
+ set testfile $testdir/subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile NULL
+
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ cleanup $testdir NULL
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644 -txn} -home $testdir $encargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+ puts "Subdb002: $method ($largs) basic subdb tests in an environment"
+
+ # We're in an env--use default path to database rather than specifying
+ # it explicitly.
+ set testfile subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_open berkdb_open_noerr
+ } else {
+ set sdb002_open berkdb_open
+ }
+
+ if { $env == "NULL" } {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {$omethod $testfile $subdb}} db]
+ } else {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {-env $env $omethod $testfile $subdb}} db]
+ }
+
+ #
+ # If -queue method, we need to make sure that trying to
+ # create a subdb fails.
+ if { [is_queue $method] == 1 } {
+ error_check_bad dbopen $ret 0
+ puts "Subdb002: skipping remainder of test for method $method"
+ return
+ }
+
+ error_check_good dbopen $ret 0
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb002_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb002.check
+ }
+ puts "\tSubdb002.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tSubdb002.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb002.d: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
+}
+
+# Check function for Subdb002; keys and data are identical
+proc subdb002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb002_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdb003.tcl b/storage/bdb/test/sdb003.tcl
new file mode 100644
index 00000000000..5d1536d8c84
--- /dev/null
+++ b/storage/bdb/test/sdb003.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb003.tcl,v 11.24 2002/06/10 15:39:37 sue Exp $
+#
+# TEST subdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 2000)
+# TEST
+# TEST Use the first 10,000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+proc subdb003 { method {nentries 1000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb003: skipping for method $method"
+ return
+ }
+
+ puts "Subdb003: $method ($args) many subdb tests"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ incr fcount
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
+ }
+ close $fdid
+ puts ""
+}
+
+# Check function for Subdb003; keys and data are identical
+proc subdb003.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb003_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdb004.tcl b/storage/bdb/test/sdb004.tcl
new file mode 100644
index 00000000000..d3d95f1fde0
--- /dev/null
+++ b/storage/bdb/test/sdb004.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb004.tcl,v 11.22 2002/07/11 18:53:45 sandstro Exp $
+#
+# TEST subdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc subdb004 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb004: skipping for method $method"
+ return
+ }
+
+ puts "Subdb004: $method ($args) \
+ filecontents=subdbname filename=key filecontents=data pairs"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb004_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc subdb004.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
+ set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
+
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $fcount} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ }
+ puts "\tSubdb004.a: Set/Check each subdb"
+ foreach f $file_list {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ set subdb $data
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+
+ error_check_good Subdb004:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ # puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ }
+
+ #
+ # Now for each file, check that the subdb name is the same
+ # as the data in that subdb and that the filename is the key.
+ #
+ puts "\tSubdb004.b: Compare subdb names with key/data"
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get -first] } { [llength $d] != 0 } \
+ {set d [$c get -next] } {
+ set subdbname [lindex [lindex $d 0] 0]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Output the subdb name
+ set ofid [open $t3 w]
+ fconfigure $ofid -translation binary
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
+ puts -nonewline $ofid $subdbname
+ close $ofid
+
+ # Output the data
+ set subc [eval {$subdb cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
+ set d [$subc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set key [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+
+ set ofid [open $t1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $key $t1
+ $checkfunc $key $t3
+
+ error_check_good Subdb004:diff($t3,$t1) \
+ [filecmp $t3 $t1] 0
+ error_check_good curs_close [$subc close] 0
+ error_check_good db_close [$subdb close] 0
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_record_based $method] != 1 } {
+ fileremove $t2.tmp
+ }
+}
+
+# Check function for subdb004; key should be file name; data should be contents
+proc subdb004.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc subdb004_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Subdb004:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/storage/bdb/test/sdb005.tcl b/storage/bdb/test/sdb005.tcl
new file mode 100644
index 00000000000..98cea5b348b
--- /dev/null
+++ b/storage/bdb/test/sdb005.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb005.tcl,v 11.18 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb005 {method {nentries 100} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb005: skipping for method $method"
+ return
+ }
+
+ puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ set txn ""
+ set psize 8192
+ set duplist {-1 -1 -1 -1 -1}
+ build_all_subdb \
+ $testfile [list $method] $psize $duplist $nentries $args
+ set numdb [llength $duplist]
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0} {$i < $numdb} {incr i} {
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db_handle($i) $db
+ # Used in 005.c test
+ lappend subdbnames sub$i.db
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set d [$dbc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Used in 005.b test
+ set db_key($i) [lindex [lindex $d 0] 0]
+
+ set d [$dbc get -prev]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ set d [$dbc get -last]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set d [$dbc get -next]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
+ }
+ #
+ # Get a key from each subdb and try to get this key in a
+ # different subdb. Make sure it fails
+ #
+ puts "\tSubdb005.b: Get keys in different subdb's"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set n [expr $i + 1]
+ if {$n == $numdb} {
+ set n 0
+ }
+ set db $db_handle($i)
+ if { [is_record_based $method] == 1 } {
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
+ error_check_good \
+ db_get [expr [llength $d] == 0] 1
+ } else {
+ set d [eval {$db get} $txn {$db_key($n)}]
+ error_check_good db_get [expr [llength $d] == 0] 1
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ #
+ # Clean up
+ #
+ for {set i 0} {$i < $numdb} {incr i} {
+ error_check_good db_close [$db_handle($i) close] 0
+ }
+
+ #
+ # Check contents of DB for subdb names only. Makes sure that
+ # every subdbname is there and that nothing else is there.
+ #
+ puts "\tSubdb005.c: Check DB is read-only"
+ error_check_bad dbopen [catch \
+ {berkdb_open_noerr -unknown $testfile} ret] 0
+
+ puts "\tSubdb005.d: Check contents of DB for subdb names only"
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set subdblist [$db get -glob *]
+ foreach kd $subdblist {
+ # subname also used in subdb005.e,f below
+ set subname [lindex $kd 0]
+ set i [lsearch $subdbnames $subname]
+ error_check_good subdb_search [expr $i != -1] 1
+ set subdbnames [lreplace $subdbnames $i $i]
+ }
+ error_check_good subdb_done [llength $subdbnames] 0
+
+ error_check_good db_close [$db close] 0
+ return
+}
diff --git a/storage/bdb/test/sdb006.tcl b/storage/bdb/test/sdb006.tcl
new file mode 100644
index 00000000000..fd6066b08d6
--- /dev/null
+++ b/storage/bdb/test/sdb006.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb006.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST subdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb006 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ # NB: these flags are internal only, ok
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb006 skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
+ berkdb srand $rand_init
+
+ set oargs $args
+ foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+
+ puts "Subdb006: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb006.a: Intra-subdb join"
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set psize 8192
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $args
+
+ # Build the primary
+ puts "Subdb006: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key [format "%04d" $i]
+ set ret [eval {$db put} $txn {$key stub}]
+ error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
+
+ close $did
+ }
+}
diff --git a/storage/bdb/test/sdb007.tcl b/storage/bdb/test/sdb007.tcl
new file mode 100644
index 00000000000..0f9488a92a1
--- /dev/null
+++ b/storage/bdb/test/sdb007.tcl
@@ -0,0 +1,132 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb007.tcl,v 11.20 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST Test 3 different scenarios for page sizes.
+# TEST 1. Create/open with a default page size, 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 2. Create/open with specific page size, 2nd subdb create with
+# TEST different one, should error.
+# TEST 3. Create/open with specified page size, 2nd subdb create with
+# TEST same specified size, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb007 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb007: skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb007: skipping for specific page sizes"
+ return
+ }
+
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
+ } else {
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
+
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ #
+ # Figure out what the default page size is so that we can
+ # guarantee we create it with a different value.
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+ error_check_good dbclose [$db close] 0
+
+ if { $pgsz == 512 } {
+ set pgsz2 2048
+ } else {
+ set pgsz2 512
+ }
+
+ puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set newpgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set newpgsz [lindex $pair 1]
+ }
+ }
+ error_check_good pgsize $pgsz2 $newpgsz
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: create 2nd subdb with different page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.c.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/storage/bdb/test/sdb008.tcl b/storage/bdb/test/sdb008.tcl
new file mode 100644
index 00000000000..1c46aed2087
--- /dev/null
+++ b/storage/bdb/test/sdb008.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb008.tcl,v 11.25 2002/07/11 18:53:46 sandstro Exp $
+# TEST subdb008
+# TEST Tests lorder difference errors between subdbs.
+# TEST Test 3 different scenarios for lorder.
+# TEST 1. Create/open with specific lorder, 2nd subdb create with
+# TEST different one, should error.
+# TEST 2. Create/open with a default lorder 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 3. Create/open with specified lorder, 2nd subdb create with
+# TEST same specified lorder, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb008 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb008: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb008.db
+ set env NULL
+ } else {
+ set testfile subdb008.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append db2args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+
+ puts "\tSubdb008.b.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ # Figure out what the default lorder is so that we can
+ # guarantee we create it with a different value later.
+ set is_swap [$db is_byteswapped]
+ if { $is_swap } {
+ set other 4321
+ } else {
+ set other 1234
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 1234 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.c.0: create subdb with opposite specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.c.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 4321 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.d.0: create subdb with default lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.d.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-lorder $other $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.e.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.e.1: create 2nd subdb with same specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/storage/bdb/test/sdb009.tcl b/storage/bdb/test/sdb009.tcl
new file mode 100644
index 00000000000..4e4869643ef
--- /dev/null
+++ b/storage/bdb/test/sdb009.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb009.tcl,v 11.9 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb009
+# TEST Test DB->rename() method for subdbs
+proc subdb009 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb009: $method ($args): Test of DB->rename()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb009: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
+
+ puts "\tSubdb009.a: Create/rename file"
+ puts "\t\tSubdb009.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644}\
+ $omethod $args {$testfile $oldsdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set txn ""
+ set key 1
+ set data [pad_data $method data]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb009.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
+
+ puts "\t\tSubdb009.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $oldsdb]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $newsdb]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ puts "\tSubdb009 succeeded."
+}
diff --git a/storage/bdb/test/sdb010.tcl b/storage/bdb/test/sdb010.tcl
new file mode 100644
index 00000000000..51f25976c56
--- /dev/null
+++ b/storage/bdb/test/sdb010.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb010.tcl,v 11.14 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
+proc subdb010 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb010: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb010.db
+ set tfpath $testfile
+ set env NULL
+ } else {
+ set testfile subdb010.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set testdb DATABASE
+ set testdb2 DATABASE2
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
+
+ # File should still exist.
+ error_check_good file_exists_after [file exists $tfpath] 1
+
+ # But database should not.
+ set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
+ error_check_bad open_failed ret 0
+ error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+ puts "\tSubdb010 succeeded."
+}
diff --git a/storage/bdb/test/sdb011.tcl b/storage/bdb/test/sdb011.tcl
new file mode 100644
index 00000000000..862e32f73ed
--- /dev/null
+++ b/storage/bdb/test/sdb011.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb011.tcl,v 11.9 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc subdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $max_files != 0 && [llength $file_list] > $max_files } {
+ set fend [expr $max_files - 1]
+ set file_list [lrange $file_list 0 $fend]
+ }
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/storage/bdb/test/sdb012.tcl b/storage/bdb/test/sdb012.tcl
new file mode 100644
index 00000000000..9c05d977daf
--- /dev/null
+++ b/storage/bdb/test/sdb012.tcl
@@ -0,0 +1,428 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb012.tcl,v 1.3 2002/08/08 15:38:10 bostic Exp $
+#
+# TEST subdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc subdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort auto commit } {
+ foreach r2 { none abort auto commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/storage/bdb/test/sdbscript.tcl b/storage/bdb/test/sdbscript.tcl
new file mode 100644
index 00000000000..d1978ccb048
--- /dev/null
+++ b/storage/bdb/test/sdbscript.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbscript.tcl,v 11.9 2002/01/11 15:53:36 bostic Exp $
+#
+# Usage: subdbscript testfile subdbnumber factor
+# testfile: name of DB itself
+# subdbnumber: n, subdb indicator, of form sub$n.db
+# factor: Delete over factor'th + n'th from my subdb.
+#
+# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
+# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "subdbscript testfile subdbnumber factor"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testfile [lindex $argv 0]
+set n [ lindex $argv 1 ]
+set factor [ lindex $argv 2 ]
+
+set db [berkdb_open -unknown $testfile sub$n.db]
+error_check_good db_open [is_valid_db $db] TRUE
+
+set dbc [$db cursor]
+error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+set i 1
+for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
+ set x [expr $i - $n]
+ if { $x >= 0 && [expr $x % $factor] == 0 } {
+ puts "Deleting $d"
+ error_check_good dbc_del [$dbc del] 0
+ }
+ incr i
+}
+error_check_good db_close [$db close] 0
+
+exit
diff --git a/storage/bdb/test/sdbtest001.tcl b/storage/bdb/test/sdbtest001.tcl
new file mode 100644
index 00000000000..b8b4508c2a4
--- /dev/null
+++ b/storage/bdb/test/sdbtest001.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest001.tcl,v 11.19 2002/05/22 15:42:42 sue Exp $
+#
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest001: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest001.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
+ lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
+ lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
+ lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
+ lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
+ lappend method_list [list "-rrecno" "-recno"]
+ lappend method_list [list "-recno" "-rrecno"]
+ lappend method_list [list "-hash" "-dhash"]
+ lappend method_list [list "-dhash" "-hash"]
+ lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
+ lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
+ lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
+ lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
+ foreach methods $method_list {
+ cleanup $testdir NULL
+ puts "\tSubdbtest001.a: create subdbs of different access methods:"
+ puts "\tSubdbtest001.a: $methods"
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set psize [lindex $plist $pgsz]
+ incr pgsz
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest001_recno.check
+ } else {
+ set checkfunc subdbtest001.check
+ }
+
+ puts "\tSubdbtest001.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the
+ # dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ # filehead uses 1-based line numbers
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ }
+}
+
+# Check function for Subdbtest001; keys and data are identical
+proc subdbtest001.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest001_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdbtest002.tcl b/storage/bdb/test/sdbtest002.tcl
new file mode 100644
index 00000000000..95717413a7b
--- /dev/null
+++ b/storage/bdb/test/sdbtest002.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest002.tcl,v 11.26 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest002: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest002.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ set methods \
+ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
+ cleanup $testdir NULL
+ puts "\tSubdbtest002.a: create subdbs of different access methods:"
+ puts "\t\t$methods"
+ set psize 4096
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+
+ #
+ # XXX We need dict sorted to figure out what was deleted
+ # since things are stored sorted in the btree.
+ #
+ filesort $dict $t4
+ set dictorig $dict
+ set dict $t4
+
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ set pidlist ""
+ puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "$tclsh_path\
+ $test_path/sdbscript.tcl $testfile \
+ $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ sdbscript.tcl \
+ $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest002_recno.check
+ } else {
+ set checkfunc subdbtest002.check
+ }
+
+ puts "\tSubdbtest002.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+ #
+ # This is just so that t2 is there and empty
+ # since we are only appending below.
+ #
+ exec > $t2
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t4 r]
+ for {set i 1} {[gets $oid line] >= 0} {incr i} {
+ set farr($i) $line
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ # Sed uses 1-based line numbers
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ set beg [expr $subdb * $newent]
+ set beg [expr $beg + $i]
+ puts $oid $farr($beg)
+ }
+ }
+ close $oid
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ set dict $dictorig
+ return
+}
+
+# Check function for Subdbtest002; keys and data are identical
+proc subdbtest002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest002_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdbutils.tcl b/storage/bdb/test/sdbutils.tcl
new file mode 100644
index 00000000000..3221a422e18
--- /dev/null
+++ b/storage/bdb/test/sdbutils.tcl
@@ -0,0 +1,197 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbutils.tcl,v 11.14 2002/06/10 15:39:39 sue Exp $
+#
+proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
+ set nsubdbs [llength $dups]
+ set mlen [llength $methods]
+ set savearg $dbargs
+ for {set i 0} {$i < $nsubdbs} { incr i } {
+ set m [lindex $methods [expr $i % $mlen]]
+ set dbargs $savearg
+ subdb_build $dbname $nentries [lindex $dups $i] \
+ $i $m $psize sub$i.db $dbargs
+ }
+}
+
+proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $dbargs]
+ set omethod [convert_method $method]
+
+ puts "Method: $method"
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
+ # Create the database and open the dictionary
+ set oflags "-create -mode 0644 $omethod \
+ -pagesize $psize $dbargs $name $subdb"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ if { $ndups >= 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys keys with $ndups duplicates at interval of $dup_interval"
+ }
+ if { $ndups < 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys unique keys of pagesize $psize"
+ #
+ # If ndups is < 0, we want unique keys in each subdb,
+ # so skip ahead in the dict by nkeys * iteration
+ #
+ for { set count 0 } \
+ { $count < [expr $nkeys * $dup_interval] } {
+ incr count} {
+ set ret [gets $did str]
+ if { $ret == -1 } {
+ break
+ }
+ }
+ }
+ set txn ""
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $ndups == 0 } {
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
+ error_check_good put $ret 0
+ } elseif { $ndups < 0 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set num [expr $nkeys * $dup_interval]
+ set num [expr $num + $count + 1]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
+ set kvals($num) [pad_data $method $str]
+ error_check_good put $ret 0
+ } else {
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join_subdb { db primary subdbs key oargs } {
+ source ./include.tcl
+
+ puts "\tJoining: $subdbs on $key"
+
+ # Open all the databases
+ set p [eval {berkdb_open -unknown} $oargs $db $primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ foreach i $subdbs {
+ set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
+ error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
+
+ lappend jlist [list $jdb $key]
+ lappend dblist $jdb
+
+ }
+
+ set join_res [eval {$p get_join} $jlist]
+ set ndups [llength $join_res]
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n $subdbs {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ error_check_good number_of_dups:$subdbs $ndups $expected
+
+ #
+ # If we get here, we have the number expected, now loop
+ # through each and see if it is what we expected.
+ #
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set pair [lindex $join_res $i]
+ set k [lindex $pair 0]
+ foreach j $subdbs {
+ error_check_bad valid_dup:$j:$subdbs $j 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good \
+ valid_dup:$j:$subdbs [expr $kval % $j] 0
+ }
+ }
+
+ error_check_good close_primary [$p close] 0
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_subname { n } {
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return sub$n.db;
+ }
+}
diff --git a/storage/bdb/test/sec001.tcl b/storage/bdb/test/sec001.tcl
new file mode 100644
index 00000000000..eb4bcc24dd2
--- /dev/null
+++ b/storage/bdb/test/sec001.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec001.tcl,v 11.7 2002/05/31 16:19:30 sue Exp $
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/storage/bdb/test/sec002.tcl b/storage/bdb/test/sec002.tcl
new file mode 100644
index 00000000000..d790162f1d7
--- /dev/null
+++ b/storage/bdb/test/sec002.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec002.tcl,v 11.3 2002/04/24 19:04:59 bostic Exp $
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.b: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.c: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec002.d: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ set fid2 [open $testfile2 r+]
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ set root1 [read $fid1 $pagesize]
+ close $fid1
+ puts -nonewline $fid2 $root1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.e: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ set fid4 [open $testfile4 r+]
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ set root4 [read $fid4 $pagesize]
+ close $fid4
+ puts -nonewline $fid2 $root4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+ puts "\tSec002 complete."
+}
diff --git a/storage/bdb/test/shelltest.tcl b/storage/bdb/test/shelltest.tcl
new file mode 100644
index 00000000000..6190bac1f8d
--- /dev/null
+++ b/storage/bdb/test/shelltest.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: shelltest.tcl,v 1.20 2002/04/19 15:42:20 bostic Exp $
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
diff --git a/storage/bdb/test/si001.tcl b/storage/bdb/test/si001.tcl
new file mode 100644
index 00000000000..1a2247c5f8b
--- /dev/null
+++ b/storage/bdb/test/si001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si001.tcl,v 1.7 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex001
+# TEST Basic secondary index put/delete test
+proc sindex001 { methods {nentries 200} {tnum 1} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si002.tcl b/storage/bdb/test/si002.tcl
new file mode 100644
index 00000000000..46ba86e7560
--- /dev/null
+++ b/storage/bdb/test/si002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si002.tcl,v 1.6 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex002
+# TEST Basic cursor-based secondary index put/delete test
+proc sindex002 { methods {nentries 200} {tnum 2} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts "\tSindex00$tnum.c: Secondary c_pget/primary put overwrite loop"
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si003.tcl b/storage/bdb/test/si003.tcl
new file mode 100644
index 00000000000..1cc8c884e75
--- /dev/null
+++ b/storage/bdb/test/si003.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si003.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex003
+# TEST sindex001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc sindex003 { methods {nentries 200} {tnum 3} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [eval {berkdb_env -create -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSindex00$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts -nonewline "\tSindex00$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSindex00$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si004.tcl b/storage/bdb/test/si004.tcl
new file mode 100644
index 00000000000..291100da6b3
--- /dev/null
+++ b/storage/bdb/test/si004.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si004.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex004
+# TEST sindex002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc sindex004 { methods {nentries 200} {tnum 4} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline \
+ "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts -nonewline "\tSindex00$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si005.tcl b/storage/bdb/test/si005.tcl
new file mode 100644
index 00000000000..e5ed49175c9
--- /dev/null
+++ b/storage/bdb/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si005.tcl,v 11.4 2002/04/29 17:12:03 sandstro Exp $
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/storage/bdb/test/si006.tcl b/storage/bdb/test/si006.tcl
new file mode 100644
index 00000000000..3a1dbb3c4f8
--- /dev/null
+++ b/storage/bdb/test/si006.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $
+#
+# TEST sindex006
+# TEST Basic secondary index put/delete test with transactions
+proc sindex006 { methods {nentries 200} {tnum 6} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ puts " with transactions"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -auto_commit [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/sindex.tcl b/storage/bdb/test/sindex.tcl
new file mode 100644
index 00000000000..fc2a0fc2f31
--- /dev/null
+++ b/storage/bdb/test/sindex.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $
+#
+# Secondary index test driver and maintenance routines.
+#
+# Breaking from the usual convention, we put the driver function
+# for the secondary index tests here, in its own file. The reason
+# for this is that it's something which compartmentalizes nicely,
+# has little in common with other driver functions, and
+# is likely to be run on its own from time to time.
+#
+# The secondary index tests themselves live in si0*.tcl.
+
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# Run the secondary index tests.
+proc sindex { {verbose 0} args } {
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ sindex001 [list $pm $sm $sm] $n
+ sindex002 [list $pm $sm $sm] $n
+ # Skip tests 3 & 4 for large lists;
+ # they're not that interesting.
+ if { $n < 1000 } {
+ sindex003 [list $pm $sm $sm] $n
+ sindex004 [list $pm $sm $sm] $n
+ }
+
+ sindex006 [list $pm $sm $sm] $n
+ }
+ }
+ }
+
+ # Run secondary index join test. (There's no point in running
+ # this with both lengths, the primary is unhappy for now with fixed-
+ # length records (XXX), and we need unsorted dups in the secondaries.)
+ foreach pm { btree hash recno } {
+ foreach sm { btree hash } {
+ sindex005 [list $pm $sm $sm] 1000
+ }
+ sindex005 [list $pm btree hash] 1000
+ sindex005 [list $pm hash btree] 1000
+ }
+
+
+ # Run test with 50 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set i 0 } { $i < 50 } { incr i } {
+ # XXX this should incorporate hash after #3726
+ if { $i % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ sindex001 $methlist 500
+ sindex002 $methlist 500
+ sindex003 $methlist 500
+ sindex004 $methlist 500
+ }
+}
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_alwayscocacola }
+ }
+ return _s_noop
+}
+
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_alwayscocacola { a b } { return "Coca-Cola" }
+proc _s_noop { a b } { return $b }
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set sdb [lindex $sdbs $j]
+ set skey [[callback_n $j] $keys($i) $data($i)]
+ error_check_good sdb($j)_pget($i) \
+ [$sdb pget -get_both $skey $keys($i)] \
+ [list [list $skey $keys($i) $data($i)]]
+ }
+
+ # Make sure this secondary contains only $nentries
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nentries items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nentries
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ set sdbt [$sdb pget -get_both \
+ [[callback_n $j] $pkey $pdata] $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ set i 0
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries $i $nentries
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ set i 0
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries_bkwds $i $nentries
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/storage/bdb/test/sysscript.tcl b/storage/bdb/test/sysscript.tcl
new file mode 100644
index 00000000000..810b0df6cef
--- /dev/null
+++ b/storage/bdb/test/sysscript.tcl
@@ -0,0 +1,282 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sysscript.tcl,v 11.17 2002/07/29 17:05:24 sue Exp $
+#
+# System integration test script.
+# This script runs a single process that tests the full functionality of
+# the system. The database under test contains nfiles files. Each process
+# randomly generates a key and some data. Both keys and data are bimodally
+# distributed between small keys (1-10 characters) and large keys (the avg
+# length is indicated via the command line parameter.
+# The process then decides on a replication factor between 1 and nfiles.
+# It writes the key and data to that many files and tacks on the file ids
+# of the files it writes to the data string. For example, let's say that
+# I randomly generate the key dog and data cat. Then I pick a replication
+# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
+# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
+# pair to each file and then commit. Notice that I may generate replication
+# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
+#
+# Usage: sysscript dir nfiles key_avg data_avg
+#
+# dir: DB_HOME directory
+# nfiles: number of files in the set
+# key_avg: average big key size
+# data_avg: average big data size
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set mypid [pid]
+
+set usage "sysscript dir nfiles key_avg data_avg method"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+puts [concat "Argc: " $argc " Argv: " $argv]
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set nfiles [ lindex $argv 1 ]
+set key_avg [ lindex $argv 2 ]
+set data_avg [ lindex $argv 3 ]
+set method [ lindex $argv 4 ]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts "Beginning execution for $mypid"
+puts "$dir DB_HOME"
+puts "$nfiles files"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+
+flush stdout
+
+# Create local environment
+set dbenv [berkdb_env -txn -home $dir]
+set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
+if {$err != 0} {
+ puts $ret
+ return
+}
+
+# Now open the files
+for { set i 0 } { $i < $nfiles } { incr i } {
+ set file test044.$i.db
+ set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
+ set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \
+ error] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+set record_based [is_record_based $method]
+while { 1 } {
+ # Decide if we're going to create a big key or a small key
+ # We give small keys a 70% chance.
+ if { [berkdb random_int 1 10] < 8 } {
+ set k [random_data 5 0 0 $record_based]
+ } else {
+ set k [random_data $key_avg 0 0 $record_based]
+ }
+ set data [chop_data $method [random_data $data_avg 0 0]]
+
+ set txn [$dbenv txn]
+ set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
+ $dbenv.txn] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+
+ # Open cursors
+ for { set f 0 } {$f < $nfiles} {incr f} {
+ set cursors($f) [$db($f) cursor -txn $txn]
+ set err [catch {error_check_good $mypid:cursor_open \
+ [is_substr $cursors($f) $db($f)] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set aborted 0
+
+ # Check to see if key is already in database
+ set found 0
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set r [$db($i) get -txn $txn $k]
+ set r [$db($i) get -txn $txn $k]
+ if { $r == "-1" } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:txn_abort \
+ [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ set found 2
+ break
+ } elseif { $r != "Key $k not found." } {
+ set found 1
+ break
+ }
+ }
+ switch $found {
+ 2 {
+ # Transaction aborted, no need to do anything.
+ }
+ 0 {
+ # Key was not found, decide how much to replicate
+ # and then create a list of that many file IDs.
+ set repl [berkdb random_int 1 $nfiles]
+ set fset ""
+ for { set i 0 } { $i < $repl } {incr i} {
+ set f [berkdb random_int 0 [expr $nfiles - 1]]
+ lappend fset $f
+ set data [chop_data $method $f:$data]
+ }
+
+ foreach i $fset {
+ set r [$db($i) put -txn $txn $k $data]
+ if {$r == "-1"} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ }
+ }
+ 1 {
+ # Key was found. Make sure that all the data values
+ # look good.
+ set f [zero_list $nfiles]
+ set data $r
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $f $fnum] == 0 } {
+ #set flag -set
+ set full [record $cursors($fnum) get -set $k]
+ } else {
+ #set flag -next
+ set full [record $cursors($fnum) get -next]
+ }
+ if {[llength $full] == 0} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ set err [catch {error_check_bad \
+ $mypid:curs_get($k,$data,$fnum,$flag) \
+ [string length $full] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [pad_data $method [lindex [lindex $full 0] 1]]
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:key $key $k} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:data($k) $rec $data} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set f [lreplace $f $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+ if { $aborted == 0 } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:commit [$txn commit] \
+ 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+}
+
+# Close files
+for { set i 0 } { $i < $nfiles} { incr i } {
+ set r [$db($i) close]
+ set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+# Close tm and environment
+$dbenv close
+
+puts "[timestamp] [pid] Complete"
+flush stdout
+
+filecheck $file 0
diff --git a/storage/bdb/test/test.tcl b/storage/bdb/test/test.tcl
new file mode 100644
index 00000000000..10ee9425b7a
--- /dev/null
+++ b/storage/bdb/test/test.tcl
@@ -0,0 +1,1863 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test.tcl,v 11.225 2002/09/10 18:51:38 sue Exp $
+
+source ./include.tcl
+
+# Load DB's TCL API.
+load $tcllib
+
+if { [file exists $testdir] != 1 } {
+ file mkdir $testdir
+}
+
+global __debug_print
+global __debug_on
+global __debug_test
+global util_path
+
+#
+# Test if utilities work to figure out the path. Most systems
+# use ., but QNX has a problem with execvp of shell scripts which
+# causes it to break.
+#
+set stat [catch {exec ./db_printlog -?} ret]
+if { [string first "exec format error" $ret] != -1 } {
+ set util_path ./.libs
+} else {
+ set util_path .
+}
+set __debug_print 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
+
+# This is where the test numbering and parameters now live.
+source $test_path/testparams.tcl
+
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+foreach sub $subs {
+ if { [info exists num_test($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ continue
+ }
+ set end $num_test($sub)
+ for { set i 1 } { $i <= $end } {incr i} {
+ set name [format "%s%03d.tcl" $sub $i]
+ source $test_path/$name
+ }
+}
+
+source $test_path/archive.tcl
+source $test_path/byteorder.tcl
+source $test_path/dbm.tcl
+source $test_path/hsearch.tcl
+source $test_path/join.tcl
+source $test_path/logtrack.tcl
+source $test_path/ndbm.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
+source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sindex.tcl
+source $test_path/testutils.tcl
+source $test_path/upgrade.tcl
+
+set dict $test_path/wordlist
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+# Random number seed.
+global rand_init
+set rand_init 101301
+
+# Default record length and padding character for
+# fixed record length access method(s)
+set fixed_len 20
+set fixed_pad 0
+
+set recd_debug 0
+set log_log_record_types 0
+set ohandles {}
+
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
+# Set up any OS-specific values
+global tcl_platform
+set is_windows_test [is_substr $tcl_platform(os) "Win"]
+set is_hp_test [is_substr $tcl_platform(os) "HP-UX"]
+set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
+
+# From here on out, test.tcl contains the procs that are used to
+# run all or part of the test suite.
+
+proc run_std { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set am_only 0
+ set no_am 0
+ set std_only 1
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ A {
+ set std_only 0
+ }
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
+ m {
+ set am_only 1
+ puts "run_std: access method tests only."
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ if { $std_only == 1 } {
+ fileremove -f ALL.OUT
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ }
+
+ set test_list {
+ {"environment" "env"}
+ {"archive" "archive"}
+ {"locking" "lock"}
+ {"logging" "log"}
+ {"memory pool" "memp"}
+ {"mutex" "mutex"}
+ {"transaction" "txn"}
+ {"deadlock detection" "dead"}
+ {"subdatabase" "sdb"}
+ {"byte-order" "byte"}
+ {"recno backing file" "rsrc"}
+ {"DBM interface" "dbm"}
+ {"NDBM interface" "ndbm"}
+ {"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
+ }
+
+ if { $am_only == 0 } {
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $cmd" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh instantiations
+ # so we don't require so much memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
+ puts "Running recovery tests"
+ if [catch {
+ exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd tests"
+ close $o
+ }
+
+ # Run join test
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ puts "Running join test"
+ foreach i "join1 join2 join3 join4 join5 join6" {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $i" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $i test"
+ close $o
+ }
+ }
+ }
+
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach i \
+ "btree hash queue queueext recno rbtree frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method -$i $j $j $display $run $o
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:\
+ [format "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ # If running in the context of the larger 'run_all' we don't
+ # check for failure here either.
+ if { $run == 0 || $std_only == 0 } {
+ return
+ }
+
+ set failed [check_failed_run ALL.OUT]
+
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc check_failed_run { file {text "^FAIL"}} {
+ set failed 0
+ set o [open $file r]
+ while { [gets $o line] >= 0 } {
+ set ret [regexp $text $line]
+ if { $ret != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+
+ return $failed
+}
+
+proc r { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set saveflags "--"
+ foreach f $flags {
+ switch $f {
+ n {
+ set display 1
+ set run 0
+ set saveflags "-n $saveflags"
+ }
+ }
+ }
+
+ if {[catch {
+ set sub [ lindex $args 0 ]
+ switch $sub {
+ byte {
+ if { $display } {
+ puts "run_test byteorder"
+ }
+ if { $run } {
+ check_handles
+ run_test byteorder
+ }
+ }
+ archive -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest -
+ sindex {
+ if { $display } { puts "r $sub" }
+ if { $run } {
+ check_handles
+ $sub
+ }
+ }
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } { run_subsystem $sub 1 0 }
+ if { $run } {
+ run_subsystem $sub
+ }
+ }
+ join {
+ eval r $saveflags join1
+ eval r $saveflags join2
+ eval r $saveflags join3
+ eval r $saveflags join4
+ eval r $saveflags join5
+ eval r $saveflags join6
+ }
+ join1 {
+ if { $display } { puts jointest }
+ if { $run } {
+ check_handles
+ jointest
+ }
+ }
+ joinbench {
+ puts "[timestamp]"
+ eval r $saveflags join1
+ eval r $saveflags join2
+ puts "[timestamp]"
+ }
+ join2 {
+ if { $display } { puts "jointest 512" }
+ if { $run } {
+ check_handles
+ jointest 512
+ }
+ }
+ join3 {
+ if { $display } {
+ puts "jointest 8192 0 -join_item"
+ }
+ if { $run } {
+ check_handles
+ jointest 8192 0 -join_item
+ }
+ }
+ join4 {
+ if { $display } { puts "jointest 8192 2" }
+ if { $run } {
+ check_handles
+ jointest 8192 2
+ }
+ }
+ join5 {
+ if { $display } { puts "jointest 8192 3" }
+ if { $run } {
+ check_handles
+ jointest 8192 3
+ }
+ }
+ join6 {
+ if { $display } { puts "jointest 512 3" }
+ if { $run } {
+ check_handles
+ jointest 512 3
+ }
+ }
+ recd {
+ check_handles
+ run_recds $run $display [lrange $args 1 end]
+ }
+ rep {
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_repmethod 0 $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_repmethod 0 $j $j
+ }
+ }
+ for { set i 1 } \
+ { $i <= $num_test(rep) } {incr i} {
+ set test [format "%s%03d" $sub $i]
+ if { $i == 2 } {
+ if { $run } {
+ puts "Skipping rep002 \
+ (waiting on SR #6195)"
+ }
+ continue
+ }
+ if { $display } {
+ puts "run_test $test"
+ }
+ if { $run } {
+ run_test $test
+ }
+ }
+ }
+ rpc {
+ if { $display } { puts "r $sub" }
+ global rpc_svc svc_list
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc] } {
+ continue
+ }
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+ run_test run_rpcmethod
+ }
+ set rpc_svc $old_rpc_src
+ }
+ sec {
+ if { $display } {
+ run_subsystem $sub 1 0
+ }
+ if { $run } {
+ run_subsystem $sub 0 1
+ }
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_secmethod $j $j"
+ puts "eval run_test \
+ run_secenv $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_secmethod $j $j
+ eval run_test \
+ run_secenv $j $j
+ }
+ }
+ }
+ sdb {
+ if { $display } {
+ puts "eval r $saveflags sdbtest"
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ puts "eval run_test \
+ subdb $j $j"
+ }
+ }
+ if { $run } {
+ eval r $saveflags sdbtest
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ eval run_test subdb $j $j
+ }
+ }
+ }
+ btree -
+ rbtree -
+ hash -
+ queue -
+ queueext -
+ recno -
+ frecno -
+ rrecno {
+ eval run_method [lindex $args 0] \
+ 1 0 $display $run [lrange $args 1 end]
+ }
+
+ default {
+ error \
+ "FAIL:[timestamp] r: $args: unknown command"
+ }
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp] r: $args: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_subsystem { prefix { display 0 } { run 1} } {
+ global num_test
+ if { [info exists num_test($prefix)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ for { set i 1 } { $i <= $num_test($prefix) } {incr i} {
+ set name [format "%s%03d" $prefix $i]
+ if { $display } {
+ puts "eval $name"
+ }
+ if { $run } {
+ check_handles
+ catch {eval $name}
+ }
+ }
+}
+
+proc run_test { testname args } {
+ source ./include.tcl
+ foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
+ check_handles
+ eval $testname -$method $args
+ verify_dir $testdir "" 1
+ }
+}
+
+proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ if { $run == 1 } {
+ puts $outfile "run_method: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args"
+ puts $outfile " ; verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $args
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ # verify all databases the test leaves behind
+ verify_dir $testdir "" 1
+ if { $__debug_on != 0 } {
+ debug
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_method: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_rpcmethod { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ global rpc_svc
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcmethod: $method $start $stop $largs"
+
+ set save_largs $largs
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRun_rpcmethod.a: starting server, pid $dpid"
+ tclsleep 10
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ set stat [catch {eval txn003} res]
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ tclkill $dpid
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ tclkill $dpid
+}
+
+proc run_rpcnoserver { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcnoserver: $method $start $stop $largs"
+
+ set save_largs $largs
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcnoserver: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run method tests in secure mode.
+#
+proc run_secmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+
+ append largs " -encryptaes $passwd "
+ eval run_method $method $start $stop $display $run $outfile $largs
+}
+
+#
+# Run method tests in its own, new secure environment.
+#
+proc run_secenv { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_secenv: $method $start $stop $largs"
+
+ set save_largs $largs
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $testdir -encryptaes $passwd \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $name $method $parms($name) $largs
+ append largs " -encrypt "
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir -encryptaes $passwd] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_secenv: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set c [string index $test 0]
+ if { $c == "s" } {
+ set i [string range $test 1 end]
+ set name [format "subdb%03d" $i]
+ } else {
+ set i $test
+ set name [format "test%03d" $i]
+ }
+ puts "run_reptest: $method $name"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline \
+ "Repl: $name: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method {numcl 0} {start 1} {stop 0} {display 0}
+ {run 1} {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ berkdb srand $rand_init
+
+ #
+ # We want to run replication both normally and with crypto.
+ # So run it once and then run again with crypto.
+ #
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_repmethod $method \
+ 0 $i $i 0 1 stdout $largs"
+ }
+ }
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout } { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_envmethod $method \
+ $i $i 0 1 stdout $largs"
+ }
+ }
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Run both subdb and normal tests for as long as there are
+ # some of each type. Start with the subdbs:
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ # Subdb tests are done, now run through the regular tests:
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc subdb { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout} args} {
+ global num_test testdir
+ global parms
+
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args;"
+ puts $outfile "verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ eval $name $method $parms($name) $args
+ verify_dir $testdir "" 1
+ }
+ flush stdout
+ flush stderr
+ }
+}
+
+proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global parms
+ global num_test
+ global log_log_record_types
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(recd)
+ }
+ if { $run == 1 } {
+ puts "run_recd: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "recd%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Recd%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts "eval $name $method $parms($name) $args"
+ }
+ if { $run } {
+ check_handles
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types \
+ $log_log_record_types; eval $name \
+ $method $parms($name) $args" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_recd: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_recds { {run 1} {display 0} args } {
+ global log_log_record_types
+
+ set log_log_record_types 1
+ logtrack_init
+ foreach method \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ check_handles
+ if { [catch {eval \
+ run_recd -$method 1 0 $run $display $args} ret ] != 0 } {
+ puts $ret
+ }
+ }
+ if { $run } {
+ logtrack_summary
+ }
+ set log_log_record_types 0
+}
+
+proc run_all { args } {
+ global num_test
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set parallel 0
+ set nparalleltests 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $args
+
+ set test_pagesizes [get_test_pagesizes]
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ foreach pgsz $test_pagesizes {
+ set args $save_args
+ append args " -pagesize $pgsz -chksum"
+ if { $am_only == 0 } {
+ # Run recovery tests.
+ #
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
+ # XXX These too are broken into separate tclsh
+ # instantiations so we don't require so much
+ # memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
+ }
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so
+ # we don't require so much memory.
+ foreach i \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests with pagesize $pgsz"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_method -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {run_method -$i $j $j \
+ $display $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: [format \
+ "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+
+ #
+ # Run subdb tests with varying pagesizes too.
+ #
+ for { set j 1 } { $j <= $num_test(sdb) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {subdb -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {subdb -$i $j $j $display \
+ $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in a txn env"
+ for { set j 1 } { $j <= $num_test(test) } { incr j } {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod -$i $j $j $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_envmethod -$i $j $j \
+ $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: run_envmethod $i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ #
+ # Run tests using proc r. The replication tests have been
+ # moved from run_std to run_all.
+ #
+ set test_list {
+ {"replication" "rep"}
+ {"security" "sec"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ append test_list {{"RPC" "rpc"}}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ if { $run == 1 } {
+ puts "run_envmethod1: $method $start $stop $args"
+ }
+
+ set is_envmethod 1
+ if { $run == 1 } {
+ check_handles
+ env_cleanup $testdir
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-mode 0644 -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+ }
+
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod1: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 32768} }
+ 8192 { return {512 32768} }
+ 32768 { return {512 8192} }
+ default { return {512 8192 32768} }
+ }
+ error_check_good NOTREACHED 0 1
+}
diff --git a/storage/bdb/test/test001.tcl b/storage/bdb/test/test001.tcl
new file mode 100644
index 00000000000..f0b562bbf24
--- /dev/null
+++ b/storage/bdb/test/test001.tcl
@@ -0,0 +1,247 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test001.tcl,v 11.28 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} {noclean 0} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ # Sadly enough, we are using start in two different ways.
+ # In test090, it is used to test really big records numbers
+ # in queue. In replication, it is used to be able to run
+ # different iterations of this test using different key/data
+ # pairs. We try to hide all that magic here.
+ puts "\tStarting at $start"
+
+ if { $tnum != 90 } {
+ set did [open $dict]
+ for { set nlines 0 } { [gets $did str] != -1 } \
+ { incr nlines} {
+ }
+ close $did
+ if { $start + $nentries > $nlines } {
+ set start [expr $nlines - $nentries]
+ }
+ }
+ }
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ if { $noclean == 0 } {
+ cleanup $testdir $env
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ if { $start != 0 && $tnum != 90 } {
+ # Skip over "start" entries
+ for { set count 0 } { $count < $start } { incr count } {
+ gets $did str
+ }
+ set count 0
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ }
+ close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ # If this is test 90, we're checking wrap and we really
+ # only added nentries number of items starting at start.
+ # However, if this isn't 90, then we started at start and
+ # added an addition nentries number of items.
+ if { $tnum == 90 } {
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ } else {
+ for { set i 1 } { $i <= $nentries + $start } {incr i} {
+ puts $oid $i
+ }
+ }
+ close $oid
+ } else {
+ set q q
+ # We assume that when this is used with start != 0, the
+ # test database accumulates data
+ filehead [expr $nentries + $start] $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test001; keys and data are identical
+proc test001.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc test001_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test002.tcl b/storage/bdb/test/test002.tcl
new file mode 100644
index 00000000000..bc28994d6a7
--- /dev/null
+++ b/storage/bdb/test/test002.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test002.tcl,v 11.19 2002/05/22 15:42:43 sue Exp $
+#
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+
+proc test002 { method {nentries 10000} args } {
+ global datastr
+ global pad_datastr
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test002.db
+ set env NULL
+ } else {
+ set testfile test002.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ set pad_datastr [pad_data $method $datastr]
+ puts "\tTest002.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+
+ error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest002.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest002.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test002; data should be fixed are identical
+proc test002.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
diff --git a/storage/bdb/test/test003.tcl b/storage/bdb/test/test003.tcl
new file mode 100644
index 00000000000..c7bfe6c15ad
--- /dev/null
+++ b/storage/bdb/test/test003.tcl
@@ -0,0 +1,210 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test003.tcl,v 11.25 2002/05/22 18:32:18 sue Exp $
+#
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
+proc test003 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if {[is_fixed_length $method] == 1} {
+ puts "Test003 skipping for method $method"
+ return
+ }
+ puts "Test003: $method ($args) filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test003.db
+ set env NULL
+ } else {
+ set testfile test003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test003_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc test003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $limit } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 1 $limit]
+ }
+ }
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid [pad_data $method $data]
+ }
+ close $fid
+
+ error_check_good \
+ Test003:diff($f,$t4) [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the entries in the
+ # current directory
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest003.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest003.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test003; key should be file name; data should be contents
+proc test003.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test003:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc test003_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Test003:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/storage/bdb/test/test004.tcl b/storage/bdb/test/test004.tcl
new file mode 100644
index 00000000000..7bea6f88eca
--- /dev/null
+++ b/storage/bdb/test/test004.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test004.tcl,v 11.21 2002/05/22 18:32:35 sue Exp $
+#
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
+proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum test00$reopen
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set kvals ""
+ puts "\tTest00$reopen.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ lappend kvals $str
+ } else {
+ set key $str
+ }
+
+ set datastr [ make_data_str $str ]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tnum:put" $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+ if { $build_only == 1 } {
+ return $db
+ }
+ if { $reopen == 5 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ puts "\tTest00$reopen.b: get/delete loop"
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+
+ set count 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ if { [is_record_based $method] == 1 } {
+ set datastr \
+ [make_data_str [lindex $kvals [expr $k - 1]]]
+ } else {
+ set datastr [make_data_str $k]
+ }
+ error_check_good $tnum:$k $d2 [pad_data $method $datastr]
+ puts $outf $k
+ $c del
+ if { [is_record_based $method] == 1 && \
+ $do_renumber == 1 } {
+ set kvals [lreplace $kvals 0 0]
+ }
+ incr count
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ error_check_good test00$reopen:keys_deleted $count $nentries
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test005.tcl b/storage/bdb/test/test005.tcl
new file mode 100644
index 00000000000..f3e37f2149d
--- /dev/null
+++ b/storage/bdb/test/test005.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test005.tcl,v 11.7 2002/01/11 15:53:40 bostic Exp $
+#
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
+proc test005 { method {nentries 10000} args } {
+ eval {test004 $method $nentries 5 0} $args
+}
diff --git a/storage/bdb/test/test006.tcl b/storage/bdb/test/test006.tcl
new file mode 100644
index 00000000000..fbaebfe8ac8
--- /dev/null
+++ b/storage/bdb/test/test006.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test006.tcl,v 11.19 2002/05/22 15:42:44 sue Exp $
+#
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
+proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { $tnum < 10 } {
+ set tname Test00$tnum
+ set dbname test00$tnum
+ } else {
+ set tname Test0$tnum
+ set dbname test0$tnum
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$dbname.db
+ set env NULL
+ } else {
+ set testfile $dbname.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1 ]
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: put $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ if { $reopen == 1 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set count 0
+ set did [open $dict]
+ set key 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { $do_renumber == 1 } {
+ set key 1
+ } elseif { [is_record_based $method] == 1 } {
+ incr key
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: get $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del:$key $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test007.tcl b/storage/bdb/test/test007.tcl
new file mode 100644
index 00000000000..1e99d107a2d
--- /dev/null
+++ b/storage/bdb/test/test007.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test007.tcl,v 11.8 2002/01/11 15:53:40 bostic Exp $
+#
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test007 { method {nentries 10000} {tnum 7} args} {
+ eval {test006 $method $nentries 1 $tnum} $args
+}
diff --git a/storage/bdb/test/test008.tcl b/storage/bdb/test/test008.tcl
new file mode 100644
index 00000000000..0af97a40110
--- /dev/null
+++ b/storage/bdb/test/test008.tcl
@@ -0,0 +1,200 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test008.tcl,v 11.23 2002/05/22 15:42:45 sue Exp $
+#
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen 8} {debug 0} args} {
+ source ./include.tcl
+
+ set tnum test00$reopen
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test00$reopen skipping for method $method"
+ return
+ }
+
+ puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
+ if {$reopen == 9} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ set count 0
+ puts "\tTest00$reopen.a: Initial put/get loop"
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good Test00$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+
+ puts "\tTest00$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db del} $txn {$names($ndx)}]
+ error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now, reopen the file and make sure the key/data pairs look right.
+ puts "\tTest00$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest00$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+}
+
+proc test008.check { binfile tmpfile } {
+ global tnum
+ source ./include.tcl
+
+ error_check_good diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/storage/bdb/test/test009.tcl b/storage/bdb/test/test009.tcl
new file mode 100644
index 00000000000..7ef46d8c818
--- /dev/null
+++ b/storage/bdb/test/test009.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test009.tcl,v 11.8 2002/05/22 15:42:45 sue Exp $
+#
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method 9 0} $args
+}
diff --git a/storage/bdb/test/test010.tcl b/storage/bdb/test/test010.tcl
new file mode 100644
index 00000000000..0b5f5531795
--- /dev/null
+++ b/storage/bdb/test/test010.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test010.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
+ source ./include.tcl
+
+ set omethod $method
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.a: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test011.tcl b/storage/bdb/test/test011.tcl
new file mode 100644
index 00000000000..63e2203efe4
--- /dev/null
+++ b/storage/bdb/test/test011.tcl
@@ -0,0 +1,470 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test011.tcl,v 11.27 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
+proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
+ global dlist
+ global rand_init
+ source ./include.tcl
+
+ set dlist ""
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ test011_recno $method $nentries $tnum $args
+ return
+ }
+ if {$ndups < 5} {
+ set ndups 5
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "Test0$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add dups with values 1, 3, ... $ndups. Then we'll add
+ # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
+ # add before and add after.
+ puts "\tTest0$tnum.a: put and get duplicate keys."
+ set i ""
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ lappend dlist $i
+ }
+ set maxodd $i
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$str $datastr}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str ]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+
+ error_check_good Test0$tnum:put $d $str
+ set id [ id_of $datastr ]
+ error_check_good Test0$tnum:dup# $id $x
+ incr x 2
+ }
+ error_check_good Test0$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: \
+ traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: \
+ traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keyfirst" 0 0
+ set dlist [linsert $dlist 0 0]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
+ lappend dlist [expr $maxodd - 1]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-before" 2 3
+ set dlist [linsert $dlist 2 2]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-after" 4 4
+ set dlist [linsert $dlist 4 4]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+proc add_dup {db txn nentries flag dataval iter} {
+ source ./include.tcl
+
+ set dbc [eval {$db cursor} $txn]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set datastr $dataval:$str
+ set ret [$dbc get "-set" $str]
+ error_check_bad "cget(SET)" [is_substr $ret Error] 1
+ for { set i 1 } { $i < $iter } { incr i } {
+ set ret [$dbc get "-next"]
+ error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
+ }
+
+ if { [string compare $flag "-before"] == 0 ||
+ [string compare $flag "-after"] == 0 } {
+ set ret [$dbc put $flag $datastr]
+ } else {
+ set ret [$dbc put $flag $str $datastr]
+ }
+ error_check_good "$dbc put $flag" $ret 0
+ incr count
+ }
+ close $did
+ $dbc close
+}
+
+proc test011_recno { method {nentries 10000} {tnum 11} largs } {
+ global dlist
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set renum [is_rrecno $method]
+
+ puts "Test0$tnum: \
+ $method ($largs) $nentries test cursor insert functionality"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ if {$renum == 1} {
+ append largs " -renumber"
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $largs {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # The basic structure of the test is that we pick a random key
+ # in the database and then add items before, after, ?? it. The
+ # trickiness is that with RECNO, these are not duplicates, they
+ # are creating new keys. Therefore, every time we do this, the
+ # keys assigned to other values change. For this reason, we'll
+ # keep the database in tcl as a list and insert properly into
+ # it to verify that the right thing is happening. If we do not
+ # have renumber set, then the BEFORE and AFTER calls should fail.
+
+ # Seed the database with an initial record
+ gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good put $ret 0
+ set count 1
+
+ set dlist "NULL $str"
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ puts "\tTest0$tnum.a: put and get entries"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Pick a random key
+ set key [berkdb random_int 1 $count]
+ set ret [$dbc get -set $key]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:SET:key $k $key
+ error_check_good \
+ cget:SET $d [pad_data $method [lindex $dlist $key]]
+
+ # Current
+ set ret [$dbc put -current [chop_data $method $str]]
+ error_check_good cput:$key $ret 0
+ set dlist [lreplace $dlist $key $key [pad_data $method $str]]
+
+ # Before
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+
+ if { $renum == 1 } {
+ set ret [$dbc put \
+ -before [chop_data $method $str]]
+ error_check_good cput:$key:BEFORE $ret $key
+ set dlist [linsert $dlist $key $str]
+ incr count
+
+ # After
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+ set ret [$dbc put \
+ -after [chop_data $method $str]]
+ error_check_good cput:$key:AFTER $ret [expr $key + 1]
+ set dlist [linsert $dlist [expr $key + 1] $str]
+ incr count
+ }
+
+ # Now verify that the keys are in the right place
+ set i 0
+ for {set ret [$dbc get "-set" $key]} \
+ {[string length $ret] != 0 && $i < 3} \
+ {set ret [$dbc get "-next"] } {
+ set check_key [expr $key + $i]
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good cget:$key:loop $k $check_key
+
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:data $d \
+ [pad_data $method [lindex $dlist $check_key]]
+ incr i
+ }
+ }
+ close $did
+ error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create check key file.
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-first" "-next"
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-last" "-prev"
+
+ filesort $t1 $t3 -n
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+}
+
+proc test011_check { key data } {
+ global dlist
+
+ error_check_good "get key $key" $data [lindex $dlist $key]
+}
diff --git a/storage/bdb/test/test012.tcl b/storage/bdb/test/test012.tcl
new file mode 100644
index 00000000000..e7237d27267
--- /dev/null
+++ b/storage/bdb/test/test012.tcl
@@ -0,0 +1,139 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test012.tcl,v 11.20 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc test012 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test012 skipping for method $method"
+ return
+ }
+
+ puts "Test012: $method ($args) filename=data filecontents=key pairs"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test012.db
+ set env NULL
+ } else {
+ set testfile test012.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ puts "\tTest012.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file_as_key $db $txn $pflags $f
+
+ set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the data to see if they match the .o and dbtest files
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest012.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test012.check \
+ dump_binkey_file_direction "-first" "-next"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest012.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test012.check\
+ dump_binkey_file_direction "-last" "-prev"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test012; key should be file name; data should be contents
+proc test012.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test012:diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/storage/bdb/test/test013.tcl b/storage/bdb/test/test013.tcl
new file mode 100644
index 00000000000..96d7757b0d8
--- /dev/null
+++ b/storage/bdb/test/test013.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test013.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make surethat NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
+proc test013 { method {nentries 10000} args } {
+ global errorCode
+ global errorInfo
+ global fixed_pad
+ global fixed_len
+
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test013.db
+ set env NULL
+ } else {
+ set testfile test013.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test013_recno.check
+ append gflags " -recno"
+ global kvals
+ } else {
+ set checkfunc test013.check
+ }
+ puts "\tTest013.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will try to overwrite each datum, but set the
+ # NOOVERWRITE flag.
+ puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $key [chop_data $method $str]}]
+ error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Value should be unchanged.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will replace each item with its datum capitalized.
+ puts "\tTest013.c: overwrite values with capitalized datum"
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $rstr]}]
+ error_check_good put $r 0
+
+ # Value should be changed.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest013.e: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest013.f: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test013; keys and data are identical
+proc test013.check { key data } {
+ error_check_good \
+ "key/data mismatch for $key" $data [string toupper $key]
+}
+
+proc test013_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good \
+ "data mismatch for $key" $data [string toupper $kvals($key)]
+}
diff --git a/storage/bdb/test/test014.tcl b/storage/bdb/test/test014.tcl
new file mode 100644
index 00000000000..00d69d3352e
--- /dev/null
+++ b/storage/bdb/test/test014.tcl
@@ -0,0 +1,253 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test014.tcl,v 11.24 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
+proc test014 { method {nentries 10000} args } {
+ set fixed 0
+ set args [convert_args $method $args]
+
+ if { [is_fixed_length $method] == 1 } {
+ set fixed 1
+ }
+
+ puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
+
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded with null
+ # increase: is the length of the str to be appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ }
+ puts "Test014 complete."
+}
+
+proc test014_body { method flagp chars increase {nentries 10000} args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 && $chars != $increase } {
+ puts "Test014: $method: skipping replace\
+ $chars chars with string $increase times larger."
+ return
+ }
+
+ if { $flagp == 1} {
+ puts "Test014: Postpending string of len $increase with \
+ gap $chars."
+ } else {
+ puts "Test014: Replace $chars chars with string \
+ $increase times larger"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test014.db
+ set env NULL
+ } else {
+ set testfile test014.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set gflags ""
+ set pflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest014.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $flagp == 1 } {
+ # this is for postpend only
+ global dvals
+
+ # initial put
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbput $ret 0
+
+ set offset [string length $str]
+
+ # increase is the actual number of new bytes
+ # to be postpended (besides the null padding)
+ set data [repeat "P" $increase]
+
+ # chars is the amount of padding in between
+ # the old data and the new
+ set len [expr $offset + $chars + $increase]
+ set dvals($key) [binary format \
+ a[set offset]x[set chars]a[set increase] \
+ $str $data]
+ set offset [expr $offset + $chars]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
+ error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ } else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ partial_put $method $db $txn \
+ $gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest014.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env \
+ $t1 test014.check dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest014.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ test014.check dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test014; keys and data are identical
+proc test014.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/storage/bdb/test/test015.tcl b/storage/bdb/test/test015.tcl
new file mode 100644
index 00000000000..f129605a405
--- /dev/null
+++ b/storage/bdb/test/test015.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test015.tcl,v 11.27 2002/05/31 16:57:25 sue Exp $
+#
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
+proc test015 { method {nentries 7500} { start 0 } args } {
+ global fixed_len testdir
+
+ set low_range 50
+ set mid_range 100
+ set high_range 1000
+
+ if { [is_fixed_length $method] } {
+ set low_range [expr $fixed_len/2 - 2]
+ set mid_range [expr $fixed_len/2]
+ set high_range $fixed_len
+ }
+
+ set t_table {
+ { 1 { 1 1 1 } }
+ { 2 { 1 1 5 } }
+ { 3 { 1 1 $low_range } }
+ { 4 { 1 $mid_range 1 } }
+ { 5 { $mid_range $high_range 5 } }
+ { 6 { 1 $mid_range $low_range } }
+ }
+
+ puts "Test015: \
+ $method ($args) $nentries equal key/data pairs, partial put test"
+ test015_init
+ if { $start == 0 } {
+ set start { 1 2 3 4 5 6 }
+ }
+ foreach entry $t_table {
+ set this [lindex $entry 0]
+ if { [lsearch $start $this] == -1 } {
+ continue
+ }
+ puts -nonewline "$this: "
+ eval [concat test015_body $method [lindex $entry 1] \
+ $nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+puts "Verifying testdir $testdir"
+
+ error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
+ }
+}
+
+proc test015_init { } {
+ global rand_init
+
+ berkdb srand $rand_init
+}
+
+proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
+ global dvals
+ global fixed_len
+ global testdir
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set checkfunc test015.check
+
+ if { [is_fixed_length $method] && \
+ [string compare $omethod "-recno"] == 0} {
+ # is fixed recno method
+ set checkfunc test015.check
+ }
+
+ puts "Put $rcount strings random offsets between $off_low and $off_hi"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test015.db
+ set env NULL
+ } else {
+ set testfile test015.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set retdir $testdir
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest015.a: put/get loop for $nentries entries"
+
+ # Here is the loop where we put and get each key/data pair
+ # Each put is a partial put of a record that does not exist.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ if { [string length $str] > $fixed_len } {
+ continue
+ }
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { 0 } {
+ set data [replicate $str $rcount]
+ set off [ berkdb random_int $off_low $off_hi ]
+ set offn [expr $off + 1]
+ if { [is_fixed_length $method] && \
+ [expr [string length $data] + $off] >= $fixed_len} {
+ set data [string range $data 0 [expr $fixed_len-$offn]]
+ }
+ set dvals($key) [partial_shift $data $off right]
+ } else {
+ set data [chop_data $method [replicate $str $rcount]]
+
+ # This is a hack. In DB we will store the records with
+ # some padding, but these will get lost if we just return
+ # them in TCL. As a result, we're going to have to hack
+ # get to check for 0 padding and return a list consisting
+ # of the number of 0's and the actual data.
+ set off [ berkdb random_int $off_low $off_hi ]
+
+ # There is no string concatenation function in Tcl
+ # (although there is one in TclX), so we have to resort
+ # to this hack. Ugh.
+ set slen [string length $data]
+ if {[is_fixed_length $method] && \
+ $slen > $fixed_len - $off} {
+ set $slen [expr $fixed_len - $off]
+ }
+ set a "a"
+ set dvals($key) [pad_data \
+ $method [eval "binary format x$off$a$slen" {$data}]]
+ }
+ if {[is_fixed_length $method] && \
+ [string length $data] > ($fixed_len - $off)} {
+ set slen [expr $fixed_len - $off]
+ set data [eval "binary format a$slen" {$data}]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
+ {-partial [list $off [string length $data]] $key $data}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest015.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest015.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ unset dvals
+}
+
+# Check function for test015; keys and data are identical
+proc test015.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ binary scan $data "c[string length $data]" a
+ binary scan $dvals($key) "c[string length $dvals($key)]" b
+ error_check_good "mismatch on padding for key $key" $a $b
+}
+
+proc test015.fixed.check { key data } {
+ global dvals
+ global fixed_len
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ if { [string length $data] > $fixed_len } {
+ error_check_bad \
+ "data length:[string length $data] \
+ for fixed:$fixed_len" 1 1
+ }
+ puts "$data : $dvals($key)"
+ error_check_good compare_data($data,$dvals($key) \
+ $dvals($key) $data
+}
diff --git a/storage/bdb/test/test016.tcl b/storage/bdb/test/test016.tcl
new file mode 100644
index 00000000000..af289f866f4
--- /dev/null
+++ b/storage/bdb/test/test016.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test016.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
+
+proc test016 { method {nentries 10000} args } {
+ global datastr
+ global dvals
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Test016: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test016.db
+ set env NULL
+ } else {
+ set testfile test016.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest016.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int [string length $key] $len]
+ set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-first" "-next"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest016.e: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test016; data should be whatever is set in dvals
+proc test016.check { key data } {
+ global datastr
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/storage/bdb/test/test017.tcl b/storage/bdb/test/test017.tcl
new file mode 100644
index 00000000000..1f99aa328fb
--- /dev/null
+++ b/storage/bdb/test/test017.tcl
@@ -0,0 +1,306 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test017.tcl,v 11.23 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
+proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test0$tnum: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ set file_list [lrange $file_list 0 $flen]
+ }
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
+ set ovfl ""
+ # Here is the loop where we put and get each key/data pair
+ puts -nonewline "\tTest0$tnum.a: Creating duplicates with "
+ if { $contents != 0 } {
+ puts "file contents as key/data"
+ } else {
+ puts "file name as key/data"
+ }
+ foreach f $file_list {
+ if { $contents != 0 } {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ #
+ # Prepend file name to guarantee uniqueness
+ set filecont [read $fid]
+ set str $f:$filecont
+ close $fid
+ } else {
+ set str $f
+ }
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ #
+ # Save 10% files for overflow test
+ #
+ if { $contents == 0 && [expr $count % 10] == 0 } {
+ lappend ovfl $f
+ }
+ # Now retrieve all the keys matching this key
+ set ret [$db get $str]
+ error_check_bad $f:dbget_dups [llength $ret] 0
+ error_check_good $f:dbget_dups1 [llength $ret] $ndups
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ if {[string length $d] == 0} {
+ break
+ }
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:$f:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ set oid [open $t2.tmp w]
+ set o1id [open $t4.tmp w]
+ foreach f $file_list {
+ for {set i 1} {$i <= $ndups} {incr i} {
+ puts $o1id $f
+ }
+ puts $oid $f
+ }
+ close $oid
+ close $o1id
+ filesort $t2.tmp $t2
+ filesort $t4.tmp $t4
+ fileremove $t2.tmp
+ fileremove $t4.tmp
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if {$contents == 0} {
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if {$contents == 0} {
+ # Now compare the keys to see if they match the filenames
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.d: Verify off page duplicates and overflow status"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+ if { [is_btree $method] } {
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+ if {$contents == 0} {
+ # This check doesn't work in hash, since overflow
+ # pages count extra pages in buckets as well as true
+ # P_OVERFLOW pages.
+ if { [is_hash $method] == 0 } {
+ error_check_good overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+
+ #
+ # If doing overflow test, do that now. Else we are done.
+ # Add overflow pages by adding a large entry to a duplicate.
+ #
+ if { [llength $ovfl] == 0} {
+ error_check_good db_close [$db close] 0
+ return
+ }
+
+ puts "\tTest0$tnum.e: Add overflow duplicate entries"
+ set ovfldup [expr $ndups + 1]
+ foreach f $ovfl {
+ #
+ # This is just like put_file, but prepends the dup number
+ #
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set fdata [read $fid]
+ close $fid
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$f $data}]
+ error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ set stat [$db stat]
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Check function; verify data contains key
+proc test017.check { key data } {
+ error_check_good "data mismatch for key $key" $key [data_of $data]
+}
diff --git a/storage/bdb/test/test018.tcl b/storage/bdb/test/test018.tcl
new file mode 100644
index 00000000000..8fc8a14e95e
--- /dev/null
+++ b/storage/bdb/test/test018.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test018.tcl,v 11.6 2002/01/11 15:53:43 bostic Exp $
+#
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
+proc test018 { method {nentries 10000} args} {
+ puts "Test018: Off page duplicate tests"
+ eval {test011 $method $nentries 19 18 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test019.tcl b/storage/bdb/test/test019.tcl
new file mode 100644
index 00000000000..aa3a58a0bcd
--- /dev/null
+++ b/storage/bdb/test/test019.tcl
@@ -0,0 +1,131 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test019.tcl,v 11.21 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test019
+# TEST Partial get test.
+proc test019 { method {nentries 10000} args } {
+ global fixed_len
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test019.db
+ set env NULL
+ } else {
+ set testfile test019.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test019: $method ($args) $nentries partial get test"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest019.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set repl [berkdb random_int $fixed_len 100]
+ set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-nooverwrite $key $data}]
+ error_check_good dbput:$key $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ dbget:$key $ret [list [list $key [pad_data $method $data]]]
+ set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+
+ puts "\tTest019.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
+
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} \
+ $txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+
+ error_check_good dbget_data $d \
+ [string range $data $beg [expr $beg + $len - 1]]
+
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/storage/bdb/test/test020.tcl b/storage/bdb/test/test020.tcl
new file mode 100644
index 00000000000..9b6d939acad
--- /dev/null
+++ b/storage/bdb/test/test020.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test020.tcl,v 11.17 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test020
+# TEST In-Memory database tests.
+proc test020 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test020 skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # Check if we are using an env.
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test020_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test020.check
+ }
+ puts "\tTest020.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test020:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test020; keys and data are identical
+proc test020.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test020_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test021.tcl b/storage/bdb/test/test021.tcl
new file mode 100644
index 00000000000..56936da389a
--- /dev/null
+++ b/storage/bdb/test/test021.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test021.tcl,v 11.15 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
+proc test021 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test021.db
+ set env NULL
+ } else {
+ set testfile test021.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test021_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test021.check
+ }
+ puts "\tTest021.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key [reverse $str]
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and retrieve about 20
+ # records after it.
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest021.b: test ranges"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $count } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [$dbc get -set_range $key]
+ error_check_bad dbc_get:$key [string length $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+
+ for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
+ set r [$dbc get "-next"]
+ # no error checking because we may run off the end
+ # of the database
+ if { [llength $r] == 0 } {
+ continue;
+ }
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+ }
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for test021; keys and data are reversed
+proc test021.check { key data } {
+ error_check_good "key/data mismatch for $key" $data [reverse $key]
+}
+
+proc test021_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test022.tcl b/storage/bdb/test/test022.tcl
new file mode 100644
index 00000000000..d25d7ecdffe
--- /dev/null
+++ b/storage/bdb/test/test022.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test022.tcl,v 11.14 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test022
+# TEST Test of DB->getbyteswapped().
+proc test022 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 "$testdir/test022a.db"
+ set testfile2 "$testdir/test022b.db"
+ set env NULL
+ } else {
+ set testfile1 "test022a.db"
+ set testfile2 "test022b.db"
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create two databases, one in each byte order.
+ set db1 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 1234} $testfile1]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 4321} $testfile2]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Call DB->get_byteswapped on both of them.
+ set db1_order [$db1 is_byteswapped]
+ set db2_order [$db2 is_byteswapped]
+
+ # Make sure that both answers are either 1 or 0,
+ # and that exactly one of them is 1.
+ error_check_good is_byteswapped_sensible_1 \
+ [expr ($db1_order == 1 && $db2_order == 0) || \
+ ($db1_order == 0 && $db2_order == 1)] 1
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ puts "\tTest022 complete."
+}
diff --git a/storage/bdb/test/test023.tcl b/storage/bdb/test/test023.tcl
new file mode 100644
index 00000000000..c37539a0f55
--- /dev/null
+++ b/storage/bdb/test/test023.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test023.tcl,v 11.18 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+proc test023 { method args } {
+ global alphabet
+ global dupnum
+ global dupstr
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test023: $method delete duplicates/check cursor operations"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test023: skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test023.db
+ set env NULL
+ } else {
+ set testfile test023.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ foreach i { onpage offpage } {
+ if { $i == "onpage" } {
+ set dupstr DUP
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ puts "\tTest023.a: Insert key w/$i dups"
+ set key "duplicate_val_test"
+ for { set count 0 } { $count < 20 } { incr count } {
+ set ret \
+ [eval {$db put} $txn $pflags {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now let's get all the items and make sure they look OK.
+ puts "\tTest023.b: Check initial duplicates"
+ set dupnum 0
+ dump_file $db $txn $t1 test023.check
+
+ # Delete a couple of random items (FIRST, LAST one in middle)
+ # Make sure that current returns an error and that NEXT and
+ # PREV do the right things.
+
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ puts "\tTest023.c: Delete first and try gets"
+ # This should be the first duplicate
+ error_check_good \
+ dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
+
+ # Now delete it.
+ set ret [$dbc del]
+ error_check_good dbc_del:FIRST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:CURRENT $ret [list [list [] []]]
+
+ # Now Prev should fail
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:prev0 [llength $ret] 0
+
+ # Now 10 nexts should work to get us in the middle
+ for { set j 1 } { $j <= 10 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.d: Delete middle and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:10 $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Prev and Next should work
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
+
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
+
+ # Now go to the last one
+ for { set j 11 } { $j <= 19 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.e: Delete last and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:LAST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Next should fail
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next19 [llength $ret] 0
+
+ # Prev should work
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
+
+ # Now overwrite the current one, then count the number
+ # of data items to make sure that we have the right number.
+
+ puts "\tTest023.f: Count keys, overwrite current, count again"
+ # At this point we should have 17 keys the (initial 20 minus
+ # 3 deletes)
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ set ret [$dbc put -current OVERWRITE]
+ error_check_good dbc_put:current $ret 0
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
+
+ # Done, delete all the keys for next iteration
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_delete $ret 0
+
+ # database should be empty
+
+ set ret [$dbc get -first]
+ error_check_good first_after_empty [llength $ret] 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test023; keys and data are identical
+proc test023.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "bad key" $key duplicate_val_test
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/storage/bdb/test/test024.tcl b/storage/bdb/test/test024.tcl
new file mode 100644
index 00000000000..bbdc8fb2253
--- /dev/null
+++ b/storage/bdb/test/test024.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test024.tcl,v 11.19 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
+proc test024 { method {nentries 10000} args} {
+ source ./include.tcl
+ global rand_init
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test024: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test024 skipping for method HASH"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test024.db
+ set env NULL
+ } else {
+ set testfile test024.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest024.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ puts "\tTest024.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest024.c: dump file"
+
+ # Put sorted keys in file
+ set oid [open $t1 w]
+ foreach k $sorted_keys {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ # Instead of using dump_file; get all the keys by keynum
+ set oid [open $t2 w]
+ if { [string compare $omethod "-btree"] == 0 } {
+ set do_renumber 1
+ }
+
+ set gflags " -recno"
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ error_check_good Test024.c:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest024.d: close, open, and dump file"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.d:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest024.e: close, open, and dump file in reverse direction"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Put sorted keys in file
+ set rsorted ""
+ foreach k $sorted_keys {
+ set rsorted [linsert $rsorted 0 $k]
+ }
+ set oid [open $t1 w]
+ foreach k $rsorted {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k $count } { $k > 0 } { incr k -1 } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.e:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now try deleting elements and making sure they work
+ puts "\tTest024.f: delete test"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ while { $count > 0 } {
+ set kndx [berkdb random_int 1 $count]
+ set kval [lindex $keys [expr $kndx - 1]]
+ set recno [expr [lsearch $sorted_keys $kval] + 1]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1 } {
+ set ret [eval {$db del} $txn {$recno}]
+ } else {
+ set ret [eval {$db del} $txn {$kval}]
+ }
+ error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Remove the key from the key list
+ set ndx [expr $kndx - 1]
+ set keys [lreplace $keys $ndx $ndx]
+
+ if { $do_renumber == 1 } {
+ set r [expr $recno - 1]
+ set sorted_keys [lreplace $sorted_keys $r $r]
+ }
+
+ # Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $do_renumber == 1 && $recno != $count } {
+ set r [expr $recno - 1]
+ set ret [eval {$db get} $txn $gflags {$recno}]
+ error_check_good get_after_del \
+ [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Decrement count
+ incr count -1
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test025.tcl b/storage/bdb/test/test025.tcl
new file mode 100644
index 00000000000..180a1aa2939
--- /dev/null
+++ b/storage/bdb/test/test025.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test025.tcl,v 11.19 2002/05/24 15:24:54 sue Exp $
+#
+# TEST test025
+# TEST DB_APPEND flag test.
+proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test0$tnum: $method ($args)"
+
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "Test0$tnum skipping for method BTREE"
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test0$tnum skipping for method HASH"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ puts "\tTest0$tnum.a: put/get loop"
+ set gflags " -recno"
+ set pflags " -append"
+ set txn ""
+ set checkfunc test025_check
+
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ set nentries [expr $start + $nentries]
+ if { $count != 0 } {
+ gets $did str
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
+ error_check_good db_put $ret $k
+
+ set ret [eval {$db get} $txn $gflags {$k}]
+ error_check_good \
+ get $ret [list [list $k [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
+ incr count
+ }
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -first -next
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -last -prev
+}
+
+proc test025_check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good " key/data mismatch for |$key|" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test026.tcl b/storage/bdb/test/test026.tcl
new file mode 100644
index 00000000000..ce65e925d35
--- /dev/null
+++ b/storage/bdb/test/test026.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test026.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
+proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest0$tnum.a: Put loop"
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
+ set datastr [ make_data_str $str ]
+ for { set j 1 } { $j <= $ndups} {incr j} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Now we will sequentially traverse the database getting each
+ # item and deleting it.
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest0$tnum.b: Get/delete loop"
+ set i 1
+ for { set ret [$dbc get -first] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key $key $curkey
+ error_check_good \
+ seq_get:data $data [pad_data $method $i[make_data_str $key]]
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: Verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test027.tcl b/storage/bdb/test/test027.tcl
new file mode 100644
index 00000000000..a0f6dfa4dcb
--- /dev/null
+++ b/storage/bdb/test/test027.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test027.tcl,v 11.7 2002/01/11 15:53:45 bostic Exp $
+#
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test027 { method {nentries 100} args} {
+ eval {test026 $method $nentries 100 27} $args
+}
diff --git a/storage/bdb/test/test028.tcl b/storage/bdb/test/test028.tcl
new file mode 100644
index 00000000000..a546744fdac
--- /dev/null
+++ b/storage/bdb/test/test028.tcl
@@ -0,0 +1,222 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test028.tcl,v 11.20 2002/07/01 15:03:45 krinsky Exp $
+#
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
+proc test028 { method args } {
+ global dupnum
+ global dupstr
+ global alphabet
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test028: $method put after cursor delete test"
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test028 skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ set key 10
+ } else {
+ append args " -dup"
+ set key "put_after_cursor_del"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test028.db
+ set env NULL
+ } else {
+ set testfile test028.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set ndups 20
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { offpage onpage } {
+ foreach b { bigitem smallitem } {
+ if { $i == "onpage" } {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr DUP
+ }
+ } else {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ }
+
+ if { $b == "bigitem" } {
+ set dupstr [repeat $dupstr 10]
+ }
+ puts "\tTest028: $i/$b"
+
+ puts "\tTest028.a: Insert key with single data item"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ # Now let's get the item and make sure its OK.
+ puts "\tTest028.b: Check initial entry"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get \
+ $ret [list [list $key [pad_data $method $dupstr]]]
+
+ # Now try a put with NOOVERWRITE SET (should be error)
+ puts "\tTest028.c: No_overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete the item with a cursor
+ puts "\tTest028.d: Delete test"
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ puts "\tTest028.e: Reput the item"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ puts "\tTest028.f: Retrieve the item"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $ret \
+ [list [list $key [pad_data $method $dupstr]]]
+
+ # Delete the key to set up for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now repeat the above set of tests with
+ # duplicates (if not RECNO).
+ if { [is_record_based $method] == 1 } {
+ continue;
+ }
+
+ puts "\tTest028.g: Insert key with duplicates"
+ for { set count 0 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.h: Check dups"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Try no_overwrite
+ puts "\tTest028.i: No_overwrite test"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key $dupstr}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete all the elements with a cursor
+ puts "\tTest028.j: Cursor Deletes"
+ set count 0
+ for { set ret [$dbc get -set $key] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good db_seq(key) $k $key
+ error_check_good db_seq(data) $d $count$dupstr
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+ incr count
+ if { $count == [expr $ndups - 1] } {
+ puts "\tTest028.k:\
+ Duplicate No_Overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key $dupstr}]
+ error_check_good db_put [is_substr \
+ $ret "DB_KEYEXIST"] 1
+ }
+ }
+
+ # Make sure all the items are gone
+ puts "\tTest028.l: Get after delete"
+ set ret [$dbc get -set $key]
+ error_check_good get_after_del [string length $ret] 0
+
+ puts "\tTest028.m: Reput the item"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key 0$dupstr}]
+ error_check_good db_put $ret 0
+ for { set count 1 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.n: Retrieve the item"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Clean out in prep for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test028; keys and data are identical
+proc test028.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "Bad key" $key put_after_cursor_del
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/storage/bdb/test/test029.tcl b/storage/bdb/test/test029.tcl
new file mode 100644
index 00000000000..8e4b8aa6e41
--- /dev/null
+++ b/storage/bdb/test/test029.tcl
@@ -0,0 +1,245 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test029.tcl,v 11.20 2002/06/29 13:44:44 bostic Exp $
+#
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
+proc test029 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test029: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test029 skipping for method HASH"
+ return
+ }
+ if { [is_record_based $method] == 1 && $do_renumber != 1 } {
+ puts "Test029 skipping for method RECNO (w/out renumbering)"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test029.db
+ set env NULL
+ } else {
+ set testfile test029.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest029.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+
+ # Save the first and last keys
+ set last_key [lindex $sorted_keys end]
+ set last_keynum [llength $sorted_keys]
+
+ set first_key [lindex $sorted_keys 0]
+ set first_keynum 1
+
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest029.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good dbput $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now delete the first key in the database
+ puts "\tTest029.c: delete and verify renumber"
+
+ # Delete the first key in the file
+ if { [is_record_based $method] == 1 } {
+ set key $first_keynum
+ } else {
+ set key $first_key
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now we are ready to retrieve records based on
+ # record number
+ if { [string compare $omethod "-btree"] == 0 } {
+ append gflags " -recno"
+ }
+
+ # First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create a cursor; we need it for the next test and we
+ # need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # OK, now re-put the first key and make sure that we
+ # renumber the last key appropriately.
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ # Recno
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
+
+ # Now delete the first key in the database using a cursor
+ puts "\tTest029.d: delete with cursor and verify renumber"
+
+ set ret [$dbc get -first]
+ error_check_good dbc_first $ret [list [list $key $first_key]]
+
+ # Now delete at the cursor
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ # Now check the record numbers of the last keys again.
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_last_after_cursor_del:$ret $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Re-put the first key and make sure that we renumber the last
+ # key appropriately.
+ puts "\tTest029.e: put with cursor and verify renumber"
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$dbc put} \
+ $pflags {-current $first_key}]
+ error_check_good dbc_put:DB_CURRENT $ret 0
+ } else {
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test030.tcl b/storage/bdb/test/test030.tcl
new file mode 100644
index 00000000000..d91359f07a0
--- /dev/null
+++ b/storage/bdb/test/test030.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test030.tcl,v 11.18 2002/05/22 15:42:50 sue Exp $
+#
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
+proc test030 { method {nentries 10000} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 ||
+ [is_rbtree $method] == 1 } {
+ puts "Test030 skipping for method $method"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test030.db
+ set cntfile $testdir/cntfile.db
+ set env NULL
+ } else {
+ set testfile test030.db
+ set cntfile cntfile.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use a second DB to keep track of how many duplicates
+ # we enter per key
+
+ set cntdb [eval {berkdb_open -create \
+ -mode 0644} $args {-btree $cntfile}]
+ error_check_good dbopen:cntfile [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add between 1 and 10 dups with values 1 ... dups
+ # We'll verify each addition.
+
+ set did [open $dict]
+ puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ndup [berkdb random_int 1 10]
+
+ for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb put} \
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
+ error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr x
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x $ndup
+ incr count
+ }
+ close $did
+
+ # Verify on sequential pass of entire file
+ puts "\tTest030.b: sequential check"
+
+ # We can't just set lastkey to a null string, since that might
+ # be a key now!
+ set lastkey "THIS STRING WILL NEVER BE A KEY"
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+
+ # Outer loop should always get a new key
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_bad outer_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good outer_get_loop:data $d $k
+ error_check_good outer_get_loop:id $id 1
+
+ set lastkey $k
+ # Figure out how may dups we should have
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
+ set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+
+ set howmany 1
+ for { set ret [$dbc get -nextdup] } \
+ { [llength $ret] != 0 } \
+ { set ret [$dbc get -nextdup] } {
+ incr howmany
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good inner_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $howmany
+
+ }
+ error_check_good ndups_found $howmany $ndup
+ }
+
+ # Verify on key lookup
+ puts "\tTest030.c: keyed check"
+ set cnt_dbc [$cntdb cursor]
+ for {set ret [$cnt_dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$cnt_dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+
+ set howmany [lindex [lindex $ret 0] 1]
+ error_check_bad cnt_seq:data [string length $howmany] 0
+
+ set i 0
+ for {set ret [$dbc get -set $k]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr i
+
+ set k [lindex [lindex $ret 0] 0]
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $i
+ }
+ error_check_good keyed_count $i $howmany
+
+ }
+ error_check_good cnt_curs_close [$cnt_dbc close] 0
+ error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good cnt_file_close [$cntdb close] 0
+ error_check_good db_file_close [$db close] 0
+}
diff --git a/storage/bdb/test/test031.tcl b/storage/bdb/test/test031.tcl
new file mode 100644
index 00000000000..0006deb2d99
--- /dev/null
+++ b/storage/bdb/test/test031.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test031.tcl,v 11.24 2002/06/26 06:22:44 krinsky Exp $
+#
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup -dupsort $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ if { $i == 2 } {
+ set nodupstr $datastr
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Test DB_NODUPDATA using the DB handle
+ set ret [eval {$db put -nodupdata} \
+ $txn $pflags {$str [chop_data $method $nodupstr]}]
+ error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ # Test DB_NODUPDATA using cursor handle
+ set ret [$dbc get -set $str]
+ error_check_bad dbc_get [llength $ret] 0
+ set datastr [lindex [lindex $ret 0] 1]
+ error_check_bad dbc_data [string length $datastr] 0
+ set ret [eval {$dbc put -nodupdata} \
+ {$str [chop_data $method $datastr]}]
+ error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare \
+ $lastdup [pad_data $method $datastr]] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
+
+ set lastkey "THIS WILL NEVER BE A KEY VALUE"
+ # no need to delete $lastkey
+ set firsttimethru 1
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ if { [string compare $k $lastkey] != 0 } {
+ # Remove last key from the checkdb
+ if { $firsttimethru != 1 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+ set firsttimethru 0
+ set lastdup ""
+ set lastkey $k
+ set dups [lindex [lindex [eval {$check_db get} \
+ $txn {$k}] 0] 1]
+ error_check_good check_db:get:$k \
+ [string length $dups] [expr $ndups * 4]
+ }
+
+ if { [string compare $lastdup $d] > 0 } {
+ error_check_good dup_check:$k:$d 0 1
+ }
+ set lastdup $d
+
+ set pref [string range $d 0 3]
+ set ndx [string first $pref $dups]
+ error_check_good valid_duplicate [expr $ndx >= 0] 1
+ set a [string range $dups 0 [expr $ndx - 1]]
+ set b [string range $dups [expr $ndx + 4] end]
+ set dups $a$b
+ }
+ # Remove last key from the checkdb
+ if { [string length $lastkey] != 0 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+
+ # Make sure there is nothing left in check_db
+
+ set check_c [eval {$check_db cursor} $txn]
+ set ret [$check_c get -first]
+ error_check_good check_c:get:$ret [llength $ret] 0
+ error_check_good check_c:close [$check_c close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test032.tcl b/storage/bdb/test/test032.tcl
new file mode 100644
index 00000000000..2076b744851
--- /dev/null
+++ b/storage/bdb/test/test032.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test032.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
+proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
+ global alphabet rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries small sorted $ndups dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good check_c_open(2) \
+ [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_data:$k $ret [list [list $k $data]]
+ }
+ }
+
+ $db sync
+
+ # Now repeat the above test using cursor ops
+ puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good \
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $pref}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
+ }
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.d: Check error case (no cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.e: Check error case (cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test033.tcl b/storage/bdb/test/test033.tcl
new file mode 100644
index 00000000000..a7796ce99d6
--- /dev/null
+++ b/storage/bdb/test/test033.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test033.tcl,v 11.24 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
+proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.a: Put/get loop."
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key and dup
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+
+ close $did
+
+ puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/storage/bdb/test/test034.tcl b/storage/bdb/test/test034.tcl
new file mode 100644
index 00000000000..647ad940815
--- /dev/null
+++ b/storage/bdb/test/test034.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test034.tcl,v 11.8 2002/01/11 15:53:46 bostic Exp $
+#
+# TEST test034
+# TEST test032 with off-page duplicates
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
+proc test034 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test032 $method $nentries 20 34 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test032 $method [expr $nentries / 10] 100 34 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test035.tcl b/storage/bdb/test/test035.tcl
new file mode 100644
index 00000000000..06796b1e9aa
--- /dev/null
+++ b/storage/bdb/test/test035.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test035.tcl,v 11.8 2002/07/22 17:00:39 sue Exp $
+#
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test035 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test033 $method $nentries 20 35 -pagesize 512} $args
+ # Test with multiple pages of off-page duplicates
+ eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test036.tcl b/storage/bdb/test/test036.tcl
new file mode 100644
index 00000000000..4e54f363ff8
--- /dev/null
+++ b/storage/bdb/test/test036.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test036.tcl,v 11.18 2002/05/22 15:42:51 sue Exp $
+#
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
+proc test036 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ puts "Test036 skipping for method recno"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test036.db
+ set env NULL
+ } else {
+ set testfile test036.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test036_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test036.check
+ }
+ puts "\tTest036.a: put/get loop KEYFIRST"
+ # Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+}
+
+# Check function for test036; keys and data are identical
+proc test036.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test036_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test037.tcl b/storage/bdb/test/test037.tcl
new file mode 100644
index 00000000000..0b2e2989949
--- /dev/null
+++ b/storage/bdb/test/test037.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test037.tcl,v 11.18 2002/03/15 16:30:54 sue Exp $
+#
+# TEST test037
+# TEST Test DB_RMW
+proc test037 { method {nentries 100} args } {
+ global encrypt
+
+ source ./include.tcl
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test037 skipping for env $env"
+ return
+ }
+
+ puts "Test037: RMW $method"
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ # Create the database
+ env_cleanup $testdir
+ set testfile test037.db
+
+ set local_env \
+ [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ set db [eval {berkdb_open \
+ -env $local_env -create -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest037.a: Creating database"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ incr count
+ }
+ close $did
+ error_check_good dbclose [$db close] 0
+ error_check_good envclode [$local_env close] 0
+
+ puts "\tTest037.b: Setting up environments"
+
+ # Open local environment
+ set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
+ set local_env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote environment
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good \
+ remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now try put test without RMW. Gets on one site should not
+ # lock out gets on another.
+
+ # Open databases and dictionary
+ puts "\tTest037.c: Opening databases"
+ set did [open $dict]
+ set rkey 0
+
+ set db [berkdb_open -auto_commit -env $local_env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set rdb [send_cmd $f1 \
+ "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
+
+ puts "\tTest037.d: Testing without RMW"
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn} $gflags {$key}]
+ error_check_good local_get [lindex [lindex $rec 0] 1] \
+ [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ puts "\tTest037.e: Testing with RMW"
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good \
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good remote:txn_open \
+ [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
+ error_check_good \
+ local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ # Close everything up: remote first
+ set r [send_cmd $f1 "$rdb close"]
+ error_check_good remote_db_close $r 0
+
+ set r [send_cmd $f1 "$remote_env close"]
+
+ # Close locally
+ error_check_good db_close [$db close] 0
+ $local_env close
+ close $did
+ close $f1
+}
diff --git a/storage/bdb/test/test038.tcl b/storage/bdb/test/test038.tcl
new file mode 100644
index 00000000000..3babde8fe0b
--- /dev/null
+++ b/storage/bdb/test/test038.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test038.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test039.tcl b/storage/bdb/test/test039.tcl
new file mode 100644
index 00000000000..2bbc83ebe05
--- /dev/null
+++ b/storage/bdb/test/test039.tcl
@@ -0,0 +1,211 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test039.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
+
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ set xx [expr $x * 3]
+ set check_data \
+ [string range $dups $xx [expr $xx + 1]]:$k
+ error_check_good retrieve $datastr $check_data
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test040.tcl b/storage/bdb/test/test040.tcl
new file mode 100644
index 00000000000..1856f78fc2e
--- /dev/null
+++ b/storage/bdb/test/test040.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test040.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
+#
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test040 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test038 $method $nentries 20 40 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test038 $method [expr $nentries / 10] 100 40 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test041.tcl b/storage/bdb/test/test041.tcl
new file mode 100644
index 00000000000..fdcbdbef3d7
--- /dev/null
+++ b/storage/bdb/test/test041.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test041.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
+#
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test041 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test039 $method $nentries 20 41 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test039 $method [expr $nentries / 10] 100 41 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test042.tcl b/storage/bdb/test/test042.tcl
new file mode 100644
index 00000000000..9f444b8349c
--- /dev/null
+++ b/storage/bdb/test/test042.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test042.tcl,v 11.37 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
+
+proc test042 { method {nentries 1000} args } {
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test042 skipping for env $env"
+ return
+ }
+
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
+
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
+
+ # Set initial parameters
+ set do_exit 0
+ set iter 10000
+ set procs 5
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -dir { incr i; set testdir [lindex $args $i] }
+ -iter { incr i; set iter [lindex $args $i] }
+ -procs { incr i; set procs [lindex $args $i] }
+ -exit { set do_exit 1 }
+ default { append oargs " " [lindex $args $i] }
+ }
+ }
+
+ # Create the database and open the dictionary
+ set testfile test042.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $testfile 0
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $testfile $i
+ }
+ }
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Now spawn off processes
+ berkdb debug_check
+ puts "\tTest042.b: forking off $procs children"
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $testfile$i
+ } else {
+ set tf ${testfile}0
+ }
+ puts "exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log \
+ $method $testdir $tf $nentries $iter $i $procs &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log $method \
+ $testdir $tf $nentries $iter $i $procs &]
+ lappend pidlist $p
+ }
+ puts "Test042: $procs independent processes now running"
+ watch_procs $pidlist
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test042.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Test is done, blow away lock and mpool region
+ reset_env $env
+}
+
+# If we are renumbering, then each time we delete an item, the number of
+# items in the file is temporarily decreased, so the highest record numbers
+# do not exist. To make sure this doesn't happen, we never generate the
+# highest few record numbers as keys.
+#
+# For record-based methods, record numbers begin at 1, while for other keys,
+# we begin at 0 to index into an array.
+proc rand_key { method nkeys renum procs} {
+ if { $renum == 1 } {
+ return [berkdb random_int 1 [expr $nkeys - $procs]]
+ } elseif { [is_record_based $method] == 1 } {
+ return [berkdb random_int 1 $nkeys]
+ } else {
+ return [berkdb random_int 0 [expr $nkeys - 1]]
+ }
+}
+
+proc test042_dbinit { env nentries method oargs tf ext } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$tf$ext}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf$ext"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/storage/bdb/test/test043.tcl b/storage/bdb/test/test043.tcl
new file mode 100644
index 00000000000..eea7ec86d54
--- /dev/null
+++ b/storage/bdb/test/test043.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test043.tcl,v 11.17 2002/05/22 15:42:52 sue Exp $
+#
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
+proc test043 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test043: $method ($args)"
+
+ if { [is_record_based $method] != 1 } {
+ puts "Test043 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test043.db
+ set env NULL
+ } else {
+ set testfile test043.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags " -recno"
+ set txn ""
+
+ # First test implicit creation and retrieval
+ set count 1
+ set interval 5
+ if { $nentries < $interval } {
+ set nentries [expr $interval + 1]
+ }
+ puts "\tTest043.a: insert keys at $interval record intervals"
+ while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$count [chop_data $method $count]}]
+ error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ set last $count
+ incr count $interval
+ }
+
+ puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [pad_data $method [lindex [lindex $rec 0] 1]]
+ error_check_good "$dbc get key==data" [pad_data $method $k] $d
+ error_check_good "$dbc get sequential" $k $check
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ incr check $interval
+ }
+
+ # Now make sure that we get DB_KEYEMPTY for non-existent keys
+ puts "\tTest043.c: Retrieve non-existent keys"
+ global errorInfo
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+
+ set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
+ error_check_good "$db \
+ get [expr $k + 1]" $ret [list]
+
+ incr check $interval
+ # Make sure we don't do a retrieve past the end of file
+ if { $check >= $last } {
+ break
+ }
+ }
+
+ # Now try deleting and make sure the right thing happens.
+ puts "\tTest043.d: Delete tests"
+ set rec [$dbc get -first]
+ error_check_bad "$dbc get -first" [llength $rec] 0
+ error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
+ error_check_good "$dbc get -first data" \
+ [lindex [lindex $rec 0] 1] [pad_data $method 1]
+
+ # Delete the first item
+ error_check_good "$dbc del" [$dbc del] 0
+
+ # Retrieving 1 should always fail
+ set ret [eval {$db get} $txn $gflags {1}]
+ error_check_good "$db get 1" $ret [list]
+
+ # Now, retrieving other keys should work; keys will vary depending
+ # upon renumbering.
+ if { $do_renumber == 1 } {
+ set count [expr 0 + $interval]
+ set max [expr $nentries - 1]
+ } else {
+ set count [expr 1 + $interval]
+ set max $nentries
+ }
+
+ while { $count <= $max } {
+ set rec [eval {$db get} $txn $gflags {$count}]
+ if { $do_renumber == 1 } {
+ set data [expr $count + 1]
+ } else {
+ set data $count
+ }
+ error_check_good "$db get $count" \
+ [pad_data $method $data] [lindex [lindex $rec 0] 1]
+ incr count $interval
+ }
+ set max [expr $count - $interval]
+
+ puts "\tTest043.e: Verify LAST/PREV functionality"
+ set count $max
+ for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
+ set rec [$dbc get -prev] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [lindex [lindex $rec 0] 1]
+ if { $do_renumber == 1 } {
+ set data [expr $k + 1]
+ } else {
+ set data $k
+ }
+ error_check_good \
+ "$dbc get key==data" [pad_data $method $data] $d
+ error_check_good "$dbc get sequential" $k $count
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ set count [expr $count - $interval]
+ if { $count < 1 } {
+ break
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test044.tcl b/storage/bdb/test/test044.tcl
new file mode 100644
index 00000000000..67cf3ea24b8
--- /dev/null
+++ b/storage/bdb/test/test044.tcl
@@ -0,0 +1,250 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test044.tcl,v 11.32 2002/07/16 20:53:04 bostic Exp $
+#
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
+#
+# XXX
+# This test uses grow-only files currently!
+proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
+ source ./include.tcl
+ global encrypt
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test044 skipping for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
+
+ puts "Test044: system integration test db $method $nprocs processes \
+ on $nfiles files"
+
+ # Parse options
+ set otherargs ""
+ set key_avg 10
+ set data_avg 20
+ set do_exit 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -key_avg { incr i; set key_avg [lindex $args $i] }
+ -data_avg { incr i; set data_avg [lindex $args $i] }
+ -testdir { incr i; set testdir [lindex $args $i] }
+ -x.* { set do_exit 1 }
+ default {
+ lappend otherargs [lindex $args $i]
+ }
+ }
+ }
+
+ if { $cont == 0 } {
+ # Create the database and open the dictionary
+ env_cleanup $testdir
+
+ # Create an environment
+ puts "\tTest044.a: creating environment and $nfiles files"
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Create a bunch of files
+ set m $method
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $method == "all" } {
+ switch [berkdb random_int 1 2] {
+ 1 { set m -btree }
+ 2 { set m -hash }
+ }
+ } else {
+ set m $omethod
+ }
+
+ set db [eval {berkdb_open -env $dbenv -create \
+ -mode 0644 $m} $otherargs {test044.$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Close the environment
+ $dbenv close
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Database is created, now fork off the kids.
+ puts "\tTest044.b: forking off $nprocs processes and utilities"
+ set cycle 1
+ set ncycles 3
+ while { $cycle <= $ncycles } {
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Fire off deadlock detector and checkpointer
+ puts "Beginning cycle $cycle"
+ set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
+ set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
+ puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
+
+ set pidlist {}
+ for { set i 0 } {$i < $nprocs} {incr i} {
+ set p [exec $tclsh_path \
+ $test_path/sysscript.tcl $testdir \
+ $nfiles $key_avg $data_avg $omethod \
+ >& $testdir/test044.$i.log &]
+ lappend pidlist $p
+ }
+ set sleep [berkdb random_int 300 600]
+ puts \
+"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
+ tclsleep $sleep
+
+ # Now simulate a crash
+ puts "[timestamp] Crashing"
+
+ #
+ # The environment must remain open until this point to get
+ # proper sharing (using the paging file) on Win/9X. [#2342]
+ #
+ error_check_good env_close [$dbenv close] 0
+
+ tclkill $ddpid
+ tclkill $cppid
+
+ foreach p $pidlist {
+ tclkill $p
+ }
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test044.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Now run recovery
+ test044_verify $testdir $nfiles
+ incr cycle
+ }
+}
+
+proc test044_usage { } {
+ puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
+ puts " [-p procs] -x"
+}
+
+proc test044_verify { dir nfiles } {
+ source ./include.tcl
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save1
+# }
+# }
+
+ # Run recovery and then read through all the database files to make
+ # sure that they all look good.
+
+ puts "\tTest044.verify: Running recovery and verifying file contents"
+ set stat [catch {exec $util_path/db_recover -h $dir} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save2
+# }
+# }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ set db($f) [berkdb_open $dir/test044.$f.db]
+ error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
+
+ set cursors($f) [$db($f) cursor]
+ error_check_bad $f:cursor_open $cursors($f) NULL
+ error_check_good \
+ $f:cursor_open [is_substr $cursors($f) $db($f)] 1
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ for {set d [$cursors($f) get -first] } \
+ { [string length $d] != 0 } \
+ { set d [$cursors($f) get -next] } {
+
+ set k [lindex [lindex $d 0] 0]
+ set d [lindex [lindex $d 0] 1]
+
+ set flist [zero_list $nfiles]
+ set r $d
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $flist $fnum] == 0 } {
+ set fl "-set"
+ } else {
+ set fl "-next"
+ }
+
+ if { $fl != "-set" || $fnum != $f } {
+ if { [string compare $fl "-set"] == 0} {
+ set full [$cursors($fnum) \
+ get -set $k]
+ } else {
+ set full [$cursors($fnum) \
+ get -next]
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [lindex [lindex $full 0] 1]
+ error_check_good \
+ $f:dbget_$fnum:key $key $k
+ error_check_good \
+ $f:dbget_$fnum:data $rec $d
+ }
+
+ set flist [lreplace $flist $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ error_check_good $cursors($f) [$cursors($f) close] 0
+ error_check_good db_close:$f [$db($f) close] 0
+ }
+}
diff --git a/storage/bdb/test/test045.tcl b/storage/bdb/test/test045.tcl
new file mode 100644
index 00000000000..3825135facd
--- /dev/null
+++ b/storage/bdb/test/test045.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test045.tcl,v 11.24 2002/02/07 17:50:10 sue Exp $
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
+#
+# Options are:
+# -adds <maximum number of keys before you disable adds>
+# -cursors <number of cursors>
+# -dataavg <average data size>
+# -delete <minimum number of keys before you disable deletes>
+# -dups <allow duplicates in file>
+# -errpct <Induce errors errpct of the time>
+# -init <initial number of entries in database>
+# -keyavg <average key size>
+proc test045 { method {nops 10000} args } {
+ source ./include.tcl
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test045 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
+ set omethod [convert_method $method]
+
+ puts "Test045: Random tester on $method for $nops operations"
+
+ # Set initial parameters
+ set adds [expr $nops * 10]
+ set cursors 5
+ set dataavg 40
+ set delete $nops
+ set dups 0
+ set errpct 0
+ set init 0
+ if { [is_record_based $method] == 1 } {
+ set keyavg 10
+ } else {
+ set keyavg 25
+ }
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -adds { incr i; set adds [lindex $args $i] }
+ -cursors { incr i; set cursors [lindex $args $i] }
+ -dataavg { incr i; set dataavg [lindex $args $i] }
+ -delete { incr i; set delete [lindex $args $i] }
+ -dups { incr i; set dups [lindex $args $i] }
+ -errpct { incr i; set errpct [lindex $args $i] }
+ -init { incr i; set init [lindex $args $i] }
+ -keyavg { incr i; set keyavg [lindex $args $i] }
+ -extent { incr i;
+ lappend oargs "-extent" "100" }
+ default { lappend oargs [lindex $args $i] }
+ }
+ }
+
+ # Create the database and and initialize it.
+ set root $testdir/test045
+ set f $root.db
+ env_cleanup $testdir
+
+ # Run the script with 3 times the number of initial elements to
+ # set it up.
+ set db [eval {berkdb_open \
+ -create -mode 0644 $omethod} $oargs {$f}]
+ error_check_good dbopen:$f [is_valid_db $db] TRUE
+
+ set r [$db close]
+ error_check_good dbclose:$f $r 0
+
+ # We redirect standard out, but leave standard error here so we
+ # can see errors.
+
+ puts "\tTest045.a: Initializing database"
+ if { $init != 0 } {
+ set n [expr 3 * $init]
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f $n \
+ 1 $init $n $keyavg $dataavg $dups 0 -1 \
+ > $testdir/test045.init
+ }
+ # Check for test failure
+ set e [findfail $testdir/test045.init]
+ error_check_good "FAIL: error message(s) in init file" $e 0
+
+ puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
+ # Now the database is initialized, run a test
+ puts "$tclsh_path\
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
+ $keyavg $dataavg $dups $errpct > $testdir/test045.log"
+
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f \
+ $nops $cursors $delete $adds $keyavg \
+ $dataavg $dups $errpct \
+ > $testdir/test045.log
+
+ # Check for test failure
+ set e [findfail $testdir/test045.log]
+ error_check_good "FAIL: error message(s) in log file" $e 0
+
+}
diff --git a/storage/bdb/test/test046.tcl b/storage/bdb/test/test046.tcl
new file mode 100644
index 00000000000..4136f30aaa7
--- /dev/null
+++ b/storage/bdb/test/test046.tcl
@@ -0,0 +1,813 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test046.tcl,v 11.33 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
+proc test046 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest046: Overwrite test with cursor and small/big key/data."
+ puts "\tTest046:\t$method $args"
+
+ if { [is_rrecno $method] == 1} {
+ puts "\tTest046: skipping for method $method."
+ return
+ }
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ if { [is_record_based $method] == 1} {
+ set key ""
+ }
+
+ puts "\tTest046: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test046.db
+ set env NULL
+ } else {
+ set testfile test046.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1} {
+ set ret [eval {$db put} $txn {$i $data$i}]
+ } elseif { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
+ } elseif { $i < 100 } {
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest046.a: Deletes by key."
+ puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr $ret
+
+ # delete before cursor(n-1), make sure it is gone
+ set i [expr $i - 1]
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
+
+ # use set_range to get first key starting at n-1, should
+ # give us nth--but only works for btree
+ if { [is_btree $method] == 1 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([incr i])]
+ incr i -1
+ }
+ error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
+ error_check_good dbc_get(match):set $ret $curr
+
+ puts "\t\tTest046.a.2: Delete cursor item by key."
+ # nth key, which cursor should be on now
+ set i [incr i]
+ set ret [eval {$db del} $txn {$key_set($i)}]
+ error_check_good db_del $ret 0
+
+ # this should return n+1 key/data, curr has nth key/data
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i+1])]
+ }
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\t\tTest046.a.3: Delete item after cursor."
+ # we'll delete n+2, since we have deleted n-1 and n
+ # i still equal to nth, cursor on n+1
+ set i [incr i]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr [$dbc get -next]
+ error_check_bad dbc_get:next [llength $curr] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $curr] 0
+ # delete *after* cursor pos.
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
+
+ # make sure item is gone, try to get it
+ if { [string compare $omethod "-btree"] == 0} {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i +1])]
+ }
+ error_check_bad dbc_get:set(_range) [llength $ret] 0
+ error_check_bad dbc_get:set(_range) $ret $curr
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
+ $key_set([expr $i+1])
+
+ puts "\tTest046.b: Deletes by cursor."
+ puts "\t\tTest046.b.1: Delete, do DB_NEXT."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [expr $i+2]
+ # i = n+4
+ error_check_good dbc_get:next(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.2: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [expr $i-3]
+ # i = n+1 (deleted all in between)
+ error_check_good dbc_get:prev(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
+ error_check_good dbc:del [$dbc del] 0
+ # we just deleted, so current item should be KEYEMPTY, throws err
+ set ret [$dbc get -current]
+ error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
+ #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+
+ puts "\tTest046.c: Inserts (before/after), by key then cursor."
+ puts "\t\tTest046.c.1: Insert by key before the cursor."
+ # i is at curs pos, i=n+1, we want to go BEFORE
+ set i [incr i -1]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:before $ret 0
+
+ puts "\t\tTest046.c.2: Insert by key after the cursor."
+ set i [incr i +2]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:after $ret 0
+
+ puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
+ # cursor is on n+1, we'll change i to match
+ set i [incr i -1]
+
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+ if { [is_record_based $method] == 1} {
+ puts "\t\tSkipping the rest of test for method $method."
+ puts "\tTest046 ($method) complete."
+ return
+ } else {
+ # Reopen without printing __db_errs.
+ set db [eval {berkdb_open_noerr} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ # should fail with EINVAL (deleted cursor)
+ set errorCode NONE
+ error_check_good catch:put:before 1 \
+ [catch {$dbc put -before $data_set($i)} ret]
+ error_check_good dbc_put:deleted:before \
+ [is_substr $errorCode "EINVAL"] 1
+
+ # should fail with EINVAL
+ set errorCode NONE
+ error_check_good catch:put:after 1 \
+ [catch {$dbc put -after $data_set($i)} ret]
+ error_check_good dbc_put:deleted:after \
+ [is_substr $errorCode "EINVAL"] 1
+
+ puts "\t\tTest046.c.4:\
+ Insert by cursor before/after existent cursor."
+ # can't use before after w/o dup except renumber in recno
+ # first, restore an item so they don't fail
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ #error_check_good db_put $ret 0
+
+ #set ret [$dbc get -set $key_set($i)]
+ #error_check_bad dbc_get:set [llength $ret] 0
+ #set i [incr i -2]
+ # i = n - 1
+ #set ret [$dbc get -prev]
+ #set ret [$dbc put -before $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:before $ret 0
+ # cursor pos is adjusted to match prev, recently inserted
+ #incr i
+ # i = n
+ #set ret [$dbc put -after $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:after $ret 0
+ }
+
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile.d]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
+ # Prepare cursor on item
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of key/data
+ foreach ptype {init over} {
+ foreach size {big small} {
+ if { [string compare $size big] == 0 } {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 250]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 250]
+ } else {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 10]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 10]
+ }
+ }
+ }
+
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type {key_over curs_over} {
+ # Overwrite (i=initial) four different kinds of pairs
+ incr i
+ puts "\tTest046.d: Overwrites $type."
+ foreach i_pair {\
+ {small small} {big small} {small big} {big big} } {
+ # Overwrite (w=write) with four different kinds of data
+ foreach w_pair {\
+ {small small} {big small} {small big} {big big} } {
+
+ # we can only overwrite if key size matches
+ if { [string compare [lindex \
+ $i_pair 0] [lindex $w_pair 0]] != 0} {
+ continue
+ }
+
+ # first write the initial key/data
+ set ret [$dbc put -keyfirst \
+ key_init[lindex $i_pair 0] \
+ data_init[lindex $i_pair 1]]
+ error_check_good \
+ dbc_put:curr:init:$i_pair $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ data_init[lindex $i_pair 1]
+
+ # Now, try to overwrite: dups not supported in
+ # this db
+ if { [string compare $type key_over] == 0 } {
+ puts "\t\tTest046.d.$i: Key\
+ Overwrite:($i_pair) by ($w_pair)."
+ set ret [eval {$db put} $txn \
+ $"key_init[lindex $i_pair 0]" \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
+ error_check_bad \
+ db:get:check [llength $ret] 0
+ error_check_good db:get:compare_data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ } else {
+ # This is a cursor overwrite
+ puts \
+ "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
+ set ret [$dbc put -current \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbcput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$dbc get -current]
+ error_check_bad \
+ dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ }
+ } ;# foreach write pair
+ } ;# foreach initial pair
+ } ;# foreach type big/small
+
+ puts "\tTest046.d.3: Cleanup for next part of test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\tSkipping the rest of Test046 for method $method."
+ puts "\tTest046 complete."
+ return
+ }
+
+ puts "\tTest046.e.1: Open db with sorted dups."
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+ set ndups 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.e.2:\
+ Put $nkeys small key/data pairs and $ndups sorted dups."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put 20 sorted duplicates on key in middle of page
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+
+ set keym $key_set($i)
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
+ } else {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
+ }
+ error_check_good db_put:DUP($i) $ret 0
+ }
+
+ puts "\tTest046.e.3: Check duplicate duplicates"
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
+ error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
+
+ # get dup ordering
+ for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
+ set ret [$dbc get -nextdup] } {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put cursor on item in middle of dups
+ set i [expr $ndups/2]
+ set ret [$dbc get -get_both $keym $dup_set($i)]
+ error_check_bad dbc_get:get_both [llength $ret] 0
+
+ puts "\tTest046.f: Deletes by cursor."
+ puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
+
+ puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [incr i -2]
+ error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
+ #error_check_good \
+ # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # restore deleted keys
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # tested above
+
+ # Reopen database without __db_err, reset cursor
+ error_check_good dbclose [$db close] 0
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret2 [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret2] 0
+ # match
+ error_check_good dbc_get:current/set(match) $ret $ret2
+ # right one?
+ error_check_good \
+ dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
+
+ # cursor is on first dup
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # now on second dup
+ error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
+ # check cursor
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\tTest046.g: Inserts."
+ puts "\t\tTest046.g.1: Insert by key before cursor."
+ set i 0
+
+ # use "spam" to prevent a duplicate duplicate.
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
+ error_check_good db_put:before $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.2: Insert by key after cursor."
+ set i [expr $i + 2]
+ # use "eggs" to prevent a duplicate duplicate
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
+ error_check_good db_put:after $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
+ # should return EINVAL (dupsort specified)
+ error_check_good dbc_put:before:catch \
+ [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
+ error_check_good \
+ dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
+ error_check_good dbc_put:after:catch \
+ [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
+ error_check_good \
+ dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
+
+ puts "\tTest046.h: Cursor overwrites."
+ puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
+ error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest046.h.2: New db (no dupsort)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ for {set i 0} {$i < $nkeys} {incr i} {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
+ } else {
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
+ }
+ if { $i == 0 } {
+ for {set j 0} {$j < $ndups} {incr j} {
+ if { $i < 10 } {
+ set keyput key0$i
+ } else {
+ set keyput key$i
+ }
+ if { $j < 10 } {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
+ } else {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
+ }
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ for {set i 0; set ret [$dbc get -set key00]} {\
+ [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+ set i 0
+ set keym key0$i
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ set ret [$dbc get -nextdup]
+ error_check_bad dbc_get:nextdup [llength $ret] 0
+ error_check_good dbc_get:nextdup(match) \
+ [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
+
+ puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
+ set ret [$dbc put -before BEFOREPUT]
+ error_check_good dbc_put:before $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
+ # make sure that this is actually a dup w/ dup before
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good dbc_get:prev:match \
+ [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -prev]
+ # should not be a dup
+ error_check_bad dbc_get:prev(no_dup) \
+ [lindex [lindex $ret 0] 0] $keym
+
+ puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -set $keym]
+
+ # delete next 3 when fix
+ #puts "[$dbc get -current]\
+ # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set $keym]
+
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret [$dbc put -after AFTERPUT]
+ error_check_good dbc_put:after $ret 0
+ #puts [$dbc get -current]
+
+ # delete next 3 when fix
+ #set ret [$dbc get -set $keym]
+ #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set AFTERPUT]
+ #set ret [$dbc get -set $keym]
+ #set ret [$dbc get -next]
+ #puts $ret
+
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -prev]
+ # now should be on first item (non-dup) of keym
+ error_check_bad dbc_get:prev1 [llength $ret] 0
+ error_check_good \
+ dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # this is the dup we added previously
+ error_check_good \
+ dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
+
+ # now get rid of the dups we added
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev2 [llength $ret] 0
+ error_check_good dbc_del2 [$dbc del] 0
+ # put cursor on first dup item for the rest of test
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good \
+ dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.h.5: Overwrite small by small."
+ set ret [$dbc put -current DATA_OVERWRITE]
+ error_check_good dbc_put:current:overwrite $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/small) \
+ [lindex [lindex $ret 0] 1] DATA_OVERWRITE
+
+ puts "\t\tTest046.h.6: Overwrite small with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite:big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
+
+ puts "\t\tTest046.h.7: Overwrite big with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite(2):big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
+
+ puts "\t\tTest046.h.8: Overwrite big with small."
+ set ret [$dbc put -current DATA_OVERWRITE2]
+ error_check_good dbc_put:current:overwrite:small $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/small) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
+
+ puts "\tTest046.i: Cleaning up from test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest046 complete."
+}
diff --git a/storage/bdb/test/test047.tcl b/storage/bdb/test/test047.tcl
new file mode 100644
index 00000000000..61c1d0864c5
--- /dev/null
+++ b/storage/bdb/test/test047.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test047.tcl,v 11.19 2002/08/05 19:23:51 sandstro Exp $
+#
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
+proc test047 { method args } {
+ source ./include.tcl
+
+ set tstn 047
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of SET_RANGE interface to DB->c_get ($method)."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set testfile1 $testdir/test0$tstn.a.db
+ set testfile2 $testdir/test0$tstn.b.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ set testfile1 test0$tstn.a.db
+ set testfile2 test0$tstn.b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 -dup $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 20
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
+ set i 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ set curr $ret
+
+ # delete by cursor, make sure it is gone
+ error_check_good dbc_del [$dbc del] 0
+
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\tTest$tstn.d: \
+ Use another cursor to fix item on page, delete by db."
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
+
+ set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
+ error_check_bad dbc_get(2):set [llength $ret] 0
+ set curr $ret
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
+
+ # make sure item is gone
+ set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ error_check_bad dbc2_get:set_range $ret $curr
+
+ puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ # open db
+ set db [eval {berkdb_open} $oflags $testfile1]
+ error_check_good dbopen2 [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set j 0
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a dup set for same 1 key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
+ error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tstn.g: \
+ Get dups key w/ SET_RANGE, pin onpage with another cursor."
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+
+ error_check_good dbc_compare $ret $ret2
+ puts "\tTest$tstn.h: \
+ Delete duplicates' key, use SET_RANGE to get next dup."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $ret2
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ set ndups 1000
+
+ puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
+ for {set i 0} { $i < $nkeys } { incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+
+ # dups for single pair
+ if { $i == 0} {
+ for {set j 0} { $j < $ndups } { incr j } {
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ puts "\tTest$tstn.j: \
+ Get key of first dup with SET_RANGE, fix with 2 curs."
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ set curr $ret2
+
+ error_check_good dbc_compare $ret $ret2
+
+ puts "\tTest$tstn.k: Delete item by cursor, use SET_RANGE to verify."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $curr
+
+ puts "\tTest$tstn.l: Cleanup."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test048.tcl b/storage/bdb/test/test048.tcl
new file mode 100644
index 00000000000..2131f6f553c
--- /dev/null
+++ b/storage/bdb/test/test048.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test048.tcl,v 11.18 2002/07/29 20:27:49 sandstro Exp $
+#
+# TEST test048
+# TEST Cursor stability across Btree splits.
+proc test048 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 048
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test048: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0; set ret [$db get key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [eval {$db put} $txn {key0$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {key00$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure split happened."
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
+ } else {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
+ }
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tstn.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test049.tcl b/storage/bdb/test/test049.tcl
new file mode 100644
index 00000000000..3040727c469
--- /dev/null
+++ b/storage/bdb/test/test049.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test049.tcl,v 11.21 2002/05/22 15:42:53 sue Exp $
+#
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
+proc test049 { method args } {
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 049
+ set renum [is_rrecno $method]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest$tstn: Test of cursor routines with uninitialized cursors."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+ set rflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ }
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $rflags $omethod $args"
+ if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
+ append oflags " -dup"
+ }
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput:$i $ret 0
+ if { $i == 1 } {
+ for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
+ error_check_good dbput:dup:$j $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
+ puts "\tTest$tstn.c: Test dbc->get interfaces..."
+ set i 0
+ foreach flag { current first last next prev nextdup} {
+ puts "\t\t...dbc->get($flag)"
+ catch {$dbc_u get -$flag} ret
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ foreach flag { set set_range get_both} {
+ puts "\t\t...dbc->get($flag)"
+ if { [string compare $flag get_both] == 0} {
+ catch {$dbc_u get -$flag $key$i data0} ret
+ } else {
+ catch {$dbc_u get -$flag $key$i} ret
+ }
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\t\t...dbc->get(current, partial)"
+ catch {$dbc_u get -current -partial {0 0}} ret
+ error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
+
+ puts "\t\t...dbc->get(current, rmw)"
+ catch {$dbc_u get -rmw -current } ret
+ error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tstn.d: Test dbc->put interface..."
+ # partial...depends on another
+ foreach flag { after before current keyfirst keylast } {
+ puts "\t\t...dbc->put($flag)"
+ if { [string match key* $flag] == 1 } {
+ if { [is_record_based $method] == 1 } {
+ # keyfirst/keylast not allowed in recno
+ puts "\t\t...Skipping dbc->put($flag) for $method."
+ continue
+ } else {
+ # keyfirst/last should succeed
+ puts "\t\t...dbc->put($flag)...should succeed for $method"
+ error_check_good dbcput:$flag \
+ [$dbc_u put -$flag $key$i data0] 0
+
+ # now uninitialize cursor
+ error_check_good dbc_close [$dbc_u close] 0
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good \
+ db_cursor [is_substr $dbc_u $db] 1
+ }
+ } elseif { [string compare $flag before ] == 0 ||
+ [string compare $flag after ] == 0 } {
+ if { [is_record_based $method] == 0 &&
+ [is_rbtree $method] == 0} {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ } elseif { $renum == 1 } {
+ # Renumbering recno will return a record number
+ set currecno \
+ [lindex [lindex [$dbc_u get -current] 0] 0]
+ set ret [$dbc_u put -$flag data0]
+ if { [string compare $flag after] == 0 } {
+ error_check_good "$dbc_u put $flag" \
+ $ret [expr $currecno + 1]
+ } else {
+ error_check_good "$dbc_u put $flag" \
+ $ret $currecno
+ }
+ } else {
+ puts "\t\tSkipping $flag for $method"
+ }
+ } else {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ }
+ }
+ # and partial
+ puts "\t\t...dbc->put(partial)"
+ catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
+ error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
+
+ # XXX dbc->dup, db->join (dbc->get join_item)
+ # dbc del
+ puts "\tTest$tstn.e: Test dbc->del interface."
+ catch {$dbc_u del} ret
+ error_check_good dbc_del [is_substr $errorCode EINVAL] 1
+
+ error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test050.tcl b/storage/bdb/test/test050.tcl
new file mode 100644
index 00000000000..dfaeddd035c
--- /dev/null
+++ b/storage/bdb/test/test050.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test050.tcl,v 11.21 2002/05/24 14:15:13 bostic Exp $
+#
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
+proc test050 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 050
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rrecno $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ puts "\tTest$tstn:\
+ Overwrite test with cursor and small/big key/data ($method)."
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # verify ordering: should be unnecessary, but hey, why take chances?
+ # key_set is zero indexed but keys start at 1
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good \
+ verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
+ }
+
+ puts "\tTest$tstn.a: Inserts before/after by cursor."
+ puts "\t\tTest$tstn.a.1:\
+ Insert with uninitialized cursor (should fail)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ catch {$dbc put -before DATA1} ret
+ error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
+
+ catch {$dbc put -after DATA2} ret
+ error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc put -current DATAOVER1]
+ error_check_good dbc_put:current:deleted $ret 0
+
+ puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -before DATAPUTBEFORE]
+ error_check_good dbc_put:before $ret $currecno
+ set old1 [$dbc get -next]
+ error_check_bad dbc_get:next [llength $old1] 0
+ error_check_good \
+ dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
+
+ puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -after DATAPUTAFTER]
+ error_check_good dbc_put:after $ret [expr $currecno + 1]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good \
+ dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+
+ puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
+ # should be $nkeys + 2 keys, starting at 1
+ for {set i 1; set ret [$dbc get -first]} { \
+ $i <= $nkeys && [llength $ret] != 0 } {\
+ incr i; set ret [$dbc get -next]} {
+ error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
+ }
+
+ # tested above
+
+ puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ #
+ # we should have ($nkeys + 2) keys, ordered:
+ # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
+ #
+ # Prepare cursor on item
+ #
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of data
+ set databig DATA_BIG_[repeat alphabet 250]
+ set datasmall DATA_SMALL
+
+ # Now, we want to overwrite data:
+ # by key and by cursor
+ # 1. small by small
+ # 2. small by big
+ # 3. big by small
+ # 4. big by big
+ #
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type { by_key by_cursor } {
+ incr i
+ puts "\tTest$tstn.b.$i: Overwrites $type."
+ foreach pair { {small small} \
+ {small big} {big small} {big big} } {
+ # put in initial type
+ set data $data[lindex $pair 0]
+ set ret [$dbc put -current $data]
+ error_check_good dbc_put:curr:init:($pair) $ret 0
+
+ # Now, try to overwrite: dups not supported in this db
+ if { [string compare $type by_key] == 0 } {
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair):$type"
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
+ error_check_good dbput:over:($pair) $ret 0
+ } else {
+ # This is a cursor overwrite
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair) by cursor."
+ set ret [$dbc put \
+ -current OVER$pair$data[lindex $pair 1]]
+ error_check_good dbcput:over:($pair) $ret 0
+ }
+ } ;# foreach pair
+ } ;# foreach type key/cursor
+
+ puts "\tTest$tstn.c: Cleanup and close cursor."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
diff --git a/storage/bdb/test/test051.tcl b/storage/bdb/test/test051.tcl
new file mode 100644
index 00000000000..830b7630788
--- /dev/null
+++ b/storage/bdb/test/test051.tcl
@@ -0,0 +1,219 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test051.tcl,v 11.21 2002/05/24 13:43:24 sue Exp $
+#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
+proc test051 { method { args "" } } {
+ global fixed_len
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test051: Test of the fixed length records."
+ if { [is_fixed_length $method] != 1 } {
+ puts "Test051: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test051.db
+ set testfile1 $testdir/test051a.db
+ set env NULL
+ } else {
+ set testfile test051.db
+ set testfile1 test051a.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set oflags "-create -mode 0644 $args"
+
+ # Test various flags (legal and illegal) to open
+ puts "\tTest051.a: Test correct flag behavior on open."
+ set errorCode NONE
+ foreach f { "-dup" "-dup -dupsort" "-recnum" } {
+ puts "\t\tTest051.a: Test flag $f"
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ set errorCode NONE
+ }
+ set f "-renumber"
+ puts "\t\tTest051.a: Test $f"
+ if { [is_frecno $method] == 1 } {
+ set db [eval {berkdb_open} $oflags $f $omethod $testfile]
+ error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
+ $db close
+ } else {
+ error_check_good \
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ }
+
+ # Test partial puts where dlen != size (should fail)
+ # it is an error to specify a partial put w/ different
+ # dlen and size in fixed length recno/queue
+ set key 1
+ set data ""
+ set txn ""
+ set test_char "a"
+
+ set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\tTest051.b: Partial puts with dlen != size."
+ foreach dlen { 1 16 20 32 } {
+ foreach doff { 0 10 20 32 } {
+ # dlen < size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen+1]"
+ set data [repeat $test_char [expr $dlen + 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ #
+ # We don't get back the server error string just
+ # the result.
+ #
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+
+ # dlen > size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen-1]"
+ set data [repeat $test_char [expr $dlen - 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen > size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+ }
+ }
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ $db close
+
+ # Partial puts for existent record -- replaces at beg, mid, and
+ # end of record, as well as full replace
+ puts "\tTest051.f: Partial puts within existent record."
+ set db [eval {berkdb_open} $oflags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t\tTest051.f: First try a put and then a full replace."
+ set data [repeat "a" $fixed_len]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+
+ set data [repeat "b" $fixed_len]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set data "InitialData"
+ set pdata "PUT"
+ set dlen [string length $pdata]
+ set ilen [string length $data]
+ set mid [expr $ilen/2]
+
+ # put initial data
+ set key 0
+
+ set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
+ puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
+ foreach doff $offlist {
+ incr key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput:init $ret 0
+
+ puts "\t\t Test051.g: Replace at offset $doff."
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
+ error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $doff == 0} {
+ set beg ""
+ set end [string range $data $dlen $ilen]
+ } else {
+ set beg [string range $data 0 [expr $doff - 1]]
+ set end [string range $data [expr $doff + $dlen] $ilen]
+ }
+ if { $doff > $ilen } {
+ # have to put padding between record and inserted
+ # string
+ set newdata [format %s%s $beg $end]
+ set diff [expr $doff - $ilen]
+ set nlen [string length $newdata]
+ set newdata [binary \
+ format a[set nlen]x[set diff]a$dlen $newdata $pdata]
+ } else {
+ set newdata [make_fixed_length \
+ frecno [format %s%s%s $beg $pdata $end]]
+ }
+ set ret [$db get -recno $key]
+ error_check_good compare($newdata,$ret) \
+ [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
+ }
+
+ $db close
+}
diff --git a/storage/bdb/test/test052.tcl b/storage/bdb/test/test052.tcl
new file mode 100644
index 00000000000..1f386449630
--- /dev/null
+++ b/storage/bdb/test/test052.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test052.tcl,v 11.16 2002/07/08 20:48:58 sandstro Exp $
+#
+# TEST test052
+# TEST Renumbering record Recno test.
+proc test052 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test052: Test of renumbering recno."
+ if { [is_rrecno $method] != 1} {
+ puts "Test052: skipping for method $method."
+ return
+ }
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest052: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test052.db
+ set env NULL
+ } else {
+ set testfile test052.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest052: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set keys($i) [lindex [lindex $ret 0] 0]
+ set darray($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest052: Deletes by key."
+ puts "\t Test052.a: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set k $keys($i)
+ set ret [$dbc get -set $k]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
+
+ # delete by key before current
+ set i [incr i -1]
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
+ # with renumber, current's data should be constant, but key==--key
+ set i [incr i +1]
+ error_check_good dbc:data \
+ [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
+ error_check_good dbc:keys \
+ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
+
+ puts "\t Test052.b: Delete cursor item by key."
+ set i [expr $nkeys/2 ]
+
+ set ret [$dbc get -set $keys($i)]
+ error_check_bad dbc:get [llength $ret] 0
+ error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 1])
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
+ set ret [$dbc get -current]
+
+ # After a delete, cursor should return DB_NOTFOUND.
+ error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
+
+ # And the item after the cursor should now be
+ # key: $nkeys/2, data: $nkeys/2 + 2
+ set ret [$dbc get -next]
+ error_check_bad dbc:getnext [llength $ret] 0
+ error_check_good dbc:getnext:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+ error_check_good dbc:getnext:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ puts "\t Test052.c: Delete item after cursor."
+ # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
+ set i [expr $nkeys/2]
+ # deleting data for key after current (key $nkeys/2 + 1)
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
+
+ # current should be constant
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:current [llength $ret] 0
+ error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
+ $keys($i)
+ error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 2])
+
+ puts "\tTest052: Deletes by cursor."
+ puts "\t Test052.d: Delete, do DB_NEXT."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc:get:curs \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # Move one more forward, so we're not on the first item.
+ error_check_bad dbc:getnext [llength [$dbc get -next]] 0
+
+ puts "\t Test052.e: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ # next should now reference the record that was previously after
+ # old current
+ set ret [$dbc get -next]
+ error_check_bad get:next [llength $ret] 0
+ error_check_good dbc:get:next:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+ error_check_good dbc:get:next:keys \
+ [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:get:curr:compare \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # The rest of the test was written with the old rrecno semantics,
+ # which required a separate c_del(CURRENT) test; to leave
+ # the database in the expected state, we now delete the first item.
+ set ret [$dbc get -first]
+ error_check_bad getfirst [llength $ret] 0
+ error_check_good delfirst [$dbc del] 0
+
+ puts "\tTest052: Inserts."
+ puts "\t Test052.g: Insert before (DB_BEFORE)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc:get:first [llength $ret] 0
+ error_check_good dbc_get:first \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:first:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+
+ set ret [$dbc put -before $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbc_put:before $ret $keys($i)
+ # cursor should adjust to point to new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_put:before:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_put:before:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc_get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+
+ puts "\t Test052.h: Insert by cursor after (DB_AFTER)."
+ set i [incr i]
+ set ret [$dbc put -after $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbcput:after $ret $keys($i)
+ # cursor should reference new item
+ set ret [$dbc get -current]
+ error_check_good dbc:get:current:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc:get:current:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ # items after curs should be adjusted
+ set ret [$dbc get -next]
+ error_check_bad dbc:get:next [llength $ret] 0
+ error_check_good dbc:get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
+
+ puts "\t Test052.i: Insert (overwrite) current item (DB_CURRENT)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ # choose a datum that is not currently in db
+ set ret [$dbc put -current $darray([expr $i + 2])]
+ error_check_good dbc_put:curr $ret 0
+ # curs should be on new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc_get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [incr i]
+ error_check_good dbc_get:next \
+ $ret [list [list $keys($i) $darray($i)]]
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest052 complete."
+}
diff --git a/storage/bdb/test/test053.tcl b/storage/bdb/test/test053.tcl
new file mode 100644
index 00000000000..3e217a2b55f
--- /dev/null
+++ b/storage/bdb/test/test053.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test053.tcl,v 11.18 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
+proc test053 { method args } {
+ global alphabet
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest053: Test of cursor stability across btree splits."
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Test053: skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test053: skipping for specific pagesizes"
+ return
+ }
+
+ set txn ""
+ set flags ""
+
+ puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test053.db
+ set env NULL
+ } else {
+ set testfile test053.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags \
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 8
+ set npages 15
+
+ # We want to create a db with npages leaf pages, and have each page
+ # be near full with keys that we can predict. We set pagesize above
+ # to 1024 bytes, it should breakdown as follows (per page):
+ #
+ # ~20 bytes overhead
+ # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
+ # data: ~4 bytes overhead, + 100 bytes
+ #
+ # then, with 8 keys/page we should be just under 1024 bytes
+ puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
+ set keystring [string range $alphabet 0 [expr $npages -1]]
+ set data [repeat DATA 22]
+ for { set i 0 } { $i < $npages } {incr i } {
+ set key ""
+ set keyroot \
+ [repeat [string toupper [string range $keystring $i $i]] 3]
+ set key_set($i) $keyroot
+ for {set j 0} { $j < $nkeys} {incr j} {
+ if { $j < 10 } {
+ set key [set keyroot]0$j
+ } else {
+ set key $keyroot$j
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts "\tTest053.c: Check page count."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ puts "\tTest053.d: Delete all but one key per page."
+ for {set i 0} { $i < $npages } {incr i } {
+ for {set j 1} { $j < $nkeys } {incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
+ error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+ puts "\tTest053.e: Check to make sure all pages are still there."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
+
+ # walk cursor through tree forward, backward.
+ # delete one key, repeat
+ for {set i 0} { $i < $npages} {incr i} {
+ puts -nonewline \
+ "\tTest053.f.$i: Walk curs through tree: forward..."
+ for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts -nonewline "backward..."
+ for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
+ $j >= $i } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.f.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j + $i - 1])00
+ }
+ }
+ puts "\tTest053.g.$i:\
+ Delete single key ([expr $npages - $i] keys left)."
+ set ret [eval {$db del} $txn {$key_set($i)00}]
+ error_check_good dbdel $ret 0
+ error_check_good del:check \
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
+ }
+
+ # end for loop, verify db_notfound
+ set ret [$dbc get -first]
+ error_check_good dbc:get:verify [llength $ret] 0
+
+ # loop: until single key restored on each page
+ for {set i 0} { $i < $npages} {incr i} {
+ puts "\tTest053.i.$i:\
+ Restore single key ([expr $i + 1] keys in tree)."
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
+ error_check_good dbput $ret 0
+
+ puts -nonewline \
+ "\tTest053.j: Walk cursor through tree: forward..."
+ for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ error_check_good dbc:get:next [llength $curr] 0
+
+ puts -nonewline "backward..."
+ for { set j $i; set curr [$dbc get -last]} { \
+ $j >= 0 } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+ error_check_good dbc:get:prev [llength $curr] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.k.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $i + 1]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j - 1])00
+ }
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "Test053 complete."
+}
diff --git a/storage/bdb/test/test054.tcl b/storage/bdb/test/test054.tcl
new file mode 100644
index 00000000000..f53f5a658bf
--- /dev/null
+++ b/storage/bdb/test/test054.tcl
@@ -0,0 +1,461 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test054.tcl,v 11.23 2002/06/17 18:41:29 sue Exp $
+#
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
+proc test054 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644"
+ puts "Test054 ($method $args):\
+ interspersed cursor and normal operations"
+ if { [is_record_based $method] == 1 } {
+ puts "Test054 skipping for method $method"
+ return
+ }
+
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-nodup.db
+ set env NULL
+ } else {
+ set testfile test054-nodup.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest054.a: No Duplicate Tests"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest054.a1: Delete w/cursor, regular get"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [llength $r] 0
+
+ # Free up the cursor.
+ error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Test case #2.
+ puts "\tTest054.a2: Cursor before K, delete K, cursor next"
+
+ # Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [eval {$curs get} -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ set r [eval {$curs get} -set {$key_set(1)} ]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now delete (next item) $key_set(2)
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Test case #3.
+ puts "\tTest054.a3: Cursor on K, delete K, cursor current"
+
+ # delete item 3
+ error_check_good \
+ db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
+ # NEEDS TO COME BACK IN, BUG CHECK
+ set ret [$curs get -current]
+ error_check_good current_after_del $ret [list [list [] []]]
+ error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest054.a4: Cursor on K, delete K, cursor next"
+
+ # Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Create the new cursor and put it on 1
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Delete 2
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now get ready for duplicate tests
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test054: skipping remainder of test for method $method."
+ return
+ }
+
+ puts "\tTest054.b: Duplicate Tests"
+ append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Test case #5.
+ puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [eval {$curs get} -set {$key_set(2)}]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
+
+ # Test case #6.
+ puts "\tTest054.b2: Now get the next duplicate from the cursor."
+
+ # Now do next on cursor
+ set r [$curs get -nextdup]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ # Test case #3.
+ puts "\tTest054.b3: Two cursors in set; each delete different items"
+
+ # Open a new cursor.
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set on last of duplicate set.
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_5
+
+ # Delete the item at cursor 1 (dup_1)
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify curs1 and curs2
+ # current should fail
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get_after_del $ret [list [list [] []]]
+
+ set r [$curs2 get -current]
+ error_check_bad curs2_get [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good curs_get:DB_CURRENT:data $d dup_5
+
+ # Now delete the item at cursor 2 (dup_5)
+ error_check_good curs2_del [$curs2 del] 0
+
+ # Verify curs1 and curs2
+ set ret [$curs get -current]
+ error_check_good curs1_get:del2 $ret [list [list [] []]]
+
+ set ret [$curs2 get -current]
+ error_check_good curs2_get:del2 $ret [list [list [] []]]
+
+ # Now verify that next and prev work.
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_4
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ puts "\tTest054.b4: Two cursors same item, one delete, one get"
+
+ # Move curs2 onto dup_2
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_3
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_2
+
+ # delete on curs 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get:deleted $ret [list [list [] []]]
+ set ret [$curs2 get -current]
+ error_check_good \
+ curs2_get:deleted $ret [list [list [] []]]
+
+ puts "\tTest054.b5: Now do a next on both cursors"
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ set r [$curs2 get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test055.tcl b/storage/bdb/test/test055.tcl
new file mode 100644
index 00000000000..25134dca4be
--- /dev/null
+++ b/storage/bdb/test/test055.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test055.tcl,v 11.16 2002/05/22 15:42:55 sue Exp $
+#
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
+proc test055 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test055: $method interspersed cursor and normal operations"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test055.db
+ set env NULL
+ } else {
+ set testfile test055.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest055.a: No duplicates"
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
+ $args {$testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest055.a1: Set cursor, retrieve current"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve current
+ set r [$curs get -current]
+ error_check_bad cursor_get:DB_CURRENT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
+
+ # Test case #2.
+ puts "\tTest055.a2: Set cursor, retrieve previous"
+ set r [$curs get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(1)
+ error_check_good \
+ curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
+
+ # Test case #3.
+ puts "\tTest055.a2: Set cursor, retrieve next"
+
+ # Now set the cursor on the middle one.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve next
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good \
+ curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
+
+ # Close cursor and database.
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test056.tcl b/storage/bdb/test/test056.tcl
new file mode 100644
index 00000000000..ef310332ed1
--- /dev/null
+++ b/storage/bdb/test/test056.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test056.tcl,v 11.18 2002/05/22 15:42:55 sue Exp $
+#
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
+proc test056 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "Test056: skipping for method $method"
+ return
+ }
+ puts "Test056: $method delete of key in presence of cursor"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test056.db
+ set env NULL
+ } else {
+ set testfile test056.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ puts "\tTest056.a: Key delete with cursor on duplicate."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on a duplicate of key 2
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do two nexts
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ # Now do the delete
+ set r [eval {$db del} $txn $flags {$key_set(2)}]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ set ret [$curs get -current]
+ error_check_good curs_after_del $ret [list [list [] []]]
+
+ # Now check that the rest of the database looks intact. There
+ # should be only two keys, 1 and 3.
+
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ set r [$curs get -next]
+ error_check_good cursor_get:DB_NEXT [llength $r] 0
+
+ puts "\tTest056.b:\
+ Cursor delete of first item, followed by cursor FIRST"
+ # Set to beginning
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ # Now do delete
+ error_check_good curs_del [$curs del] 0
+
+ # Now do DB_FIRST
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(3)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test057.tcl b/storage/bdb/test/test057.tcl
new file mode 100644
index 00000000000..04fb09ef260
--- /dev/null
+++ b/storage/bdb/test/test057.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test057.tcl,v 11.22 2002/05/22 15:42:56 sue Exp $
+#
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST Check if we handle the case where we delete a key with the cursor on
+# TEST it and then add the same key. The cursor should not get the new item
+# TEST returned, but the item shouldn't disappear.
+# TEST Run test tests, one where the overwriting put is done with a put and
+# TEST one where it's done with a cursor put.
+proc test057 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test057: skipping for method $method"
+ return
+ }
+ puts "Test057: $method delete and replace in presence of cursor."
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test057.db
+ set env NULL
+ } else {
+ set testfile test057.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good dbopen:dup [is_valid_db $db] TRUE
+
+ puts "\tTest057.a: Set cursor, delete cursor, put with key."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on key 1
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ error_check_good curs_get:del [$curs get -current] [list [list [] []]]
+
+ # Now do a put on the key
+ set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
+ error_check_good put $r 0
+
+ # Do a get
+ set r [eval {$db get} $txn {$key_set(1)}]
+ error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
+
+ # Recheck cursor
+ error_check_good curs_get:deleted [$curs get -current] [list [list [] []]]
+
+ # Move cursor and see if we get the key.
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
+
+ puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set both cursors on the 4rd key
+ set r [$curs get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(3)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2
+ set pflags "-current"
+ if {[is_hash $method] == 1} {
+ error_check_good curs1_get_after_del [is_substr \
+ [$curs2 put $pflags new_datum$key_set(3)] "DB_NOTFOUND"] 1
+
+ # Gets fail
+ error_check_good curs1_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs2_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ } else {
+ # btree only, recno is skipped this test
+ set ret [$curs2 put $pflags new_datum$key_set(3)]
+ error_check_good curs_replace $ret 0
+ }
+
+ # Gets fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ puts "\tTest057.c:\
+ Set two cursors on a dup, delete one, overwrite other"
+
+ # Set both cursors on the 2nd duplicate of key 2
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs2 get -set $key_set(2)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(2)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs2 get -next]
+ error_check_bad cursor2_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs2_get:DB_NEXT:data $d dup_1
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2 -- this will work on btree but
+ # not on hash
+ if {[is_hash $method] == 1} {
+ error_check_good hash_replace \
+ [is_substr [$curs2 put -current new_dup_1] "DB_NOTFOUND"] 1
+ } else {
+ error_check_good curs_replace [$curs2 put -current new_dup_1] 0
+ }
+
+ # Both gets should fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test058.tcl b/storage/bdb/test/test058.tcl
new file mode 100644
index 00000000000..daf164fd6e2
--- /dev/null
+++ b/storage/bdb/test/test058.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test058.tcl,v 11.20 2002/02/22 15:26:27 sandstro Exp $
+#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
+proc test058 { method args } {
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test058 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test058: skipping for method $method"
+ return
+ }
+ puts "Test058: $method delete dups after inserting after duped key."
+
+ # environment
+ env_cleanup $testdir
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # db open
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
+ set db [eval {berkdb_open} $flags $omethod "test058.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set tn ""
+ set tid ""
+ set tn [$env txn]
+ set tflags "-txn $tn"
+
+ puts "\tTest058.a: Adding 10 duplicates"
+ # Add a bunch of dups
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret \
+ [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest058.b: Adding key after duplicates"
+ # Now add one more key/data AFTER the dup set.
+ set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
+ error_check_good db_put $ret 0
+
+ error_check_good txn_commit [$tn commit] 0
+
+ set tn [$env txn]
+ error_check_good txnbegin [is_substr $tn $env] 1
+ set tflags "-txn $tn"
+
+ # Now delete everything
+ puts "\tTest058.c: Deleting duplicated key"
+ set ret [eval {$db del} $tflags {doghouse}]
+ error_check_good del $ret 0
+
+ # Now reput everything
+ set pad \
+ abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
+
+ puts "\tTest058.d: Reputting duplicates with big data vals"
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret [eval {$db put} \
+ $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
+ error_check_good db_put $ret 0
+ }
+ error_check_good txn_commit [$tn commit] 0
+
+ # Check duplicates for order
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest058.e: Verifying that duplicates are in order."
+ set i 0
+ for { set ret [$dbc get -set doghouse] } \
+ {$i < 10 && [llength $ret] != 0} \
+ { set ret [$dbc get -nextdup] } {
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good \
+ duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ reset_env $env
+}
diff --git a/storage/bdb/test/test059.tcl b/storage/bdb/test/test059.tcl
new file mode 100644
index 00000000000..596ea7a3c94
--- /dev/null
+++ b/storage/bdb/test/test059.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test059.tcl,v 11.18 2002/06/11 15:10:16 sue Exp $
+#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+proc test059 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test059: $method 0-length partial data retrieval"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test059.db
+ set env NULL
+ } else {
+ set testfile test059.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ puts "\tTest059.a: db get with 0 partial length retrieve"
+
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
+
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
+ error_check_good db_cget_set [string length $data] 0
+
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test060.tcl b/storage/bdb/test/test060.tcl
new file mode 100644
index 00000000000..4a18c97f42f
--- /dev/null
+++ b/storage/bdb/test/test060.tcl
@@ -0,0 +1,60 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test060.tcl,v 11.10 2002/05/22 15:42:56 sue Exp $
+#
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
+proc test060 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
+
+ # Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test060.db
+ set env NULL
+ } else {
+ set testfile test060.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database and check success
+ puts "\tTest060.a: open and close non-existent file with DB_EXCL"
+ set db [eval {berkdb_open \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen:excl [is_valid_db $db] TRUE
+
+ # Close it and check success
+ error_check_good db_close [$db close] 0
+
+ # Try to open it again, and make sure the open fails
+ puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
+ set errorCode NONE
+ error_check_good open:excl:catch [catch { \
+ set db [eval {berkdb_open_noerr \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ } ret ] 1
+
+ error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
+}
diff --git a/storage/bdb/test/test061.tcl b/storage/bdb/test/test061.tcl
new file mode 100644
index 00000000000..65544e88deb
--- /dev/null
+++ b/storage/bdb/test/test061.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test061.tcl,v 11.18 2002/02/22 15:26:27 sandstro Exp $
+#
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
+proc test061 { method args } {
+ global alphabet
+ global encrypt
+ global errorCode
+ global passwd
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test061 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1} {
+ puts "Test061 skipping for method $method"
+ return
+ }
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ puts "Test061: Transaction abort and commit test for in-memory data."
+ puts "Test061: $method $args"
+
+ set key "key"
+ set data "data"
+ set otherdata "otherdata"
+ set txn ""
+ set flags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1} {
+ set key 1
+ set gflags " -recno"
+ }
+
+ puts "\tTest061: Create environment and $method database."
+ env_cleanup $testdir
+
+ # create environment
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # db open -- no file specified, in-memory database
+ set flags "-auto_commit -create $args $omethod"
+ set db [eval {berkdb_open -env} $dbenv $flags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here we go with the six test cases. Since we need to verify
+ # a different thing each time, and since we can't just reuse
+ # the same data if we're to test overwrite, we just
+ # plow through rather than writing some impenetrable loop code;
+ # each of the cases is only a few lines long, anyway.
+
+ puts "\tTest061.a: put/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for *non*-existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ puts "\tTest061.b: put/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check again for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.c: overwrite/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check that data is unchanged ($data not $otherdata)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.d: overwrite/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check that data has changed ($otherdata not $data)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.e: delete/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # delete
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.f: delete/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check for continued nonexistence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ # We're done; clean up.
+ error_check_good db_close [eval {$db close}] 0
+ error_check_good env_close [eval {$dbenv close}] 0
+
+ # Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
+ puts "\tTest061.g: Running db_recover -h"
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover outputted $res"
+ }
+ error_check_good db_recover $ret 0
+
+ puts "\tTest061.h: Running db_recover -c -h"
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
+ error_check_good db_recover-c $ret 0
+}
diff --git a/storage/bdb/test/test062.tcl b/storage/bdb/test/test062.tcl
new file mode 100644
index 00000000000..5cacd98a2c0
--- /dev/null
+++ b/storage/bdb/test/test062.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test062.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
+proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest0$tnum.a: Put loop (initialize database)"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set keys($count) $str
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest0$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ # Do a partial write to extend each datum in
+ # the regular db by the corresponding dictionary word.
+ # We have to go through each key's dup set using -set
+ # because cursors are not stable in the hash AM and we
+ # want to make sure we hit all the keys.
+ for { set i 0 } { $i < $count } { incr i } {
+ set key $keys($i)
+ for {set ret [$dbc get -set $key]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set orig_d [lindex [lindex $ret 0] 1]
+ set d [string range $orig_d 2 end]
+ set doff [expr [string length $d] + 2]
+ set dlen 0
+ error_check_good data_and_key_sanity $d $k
+
+ set ret [$dbc get -current]
+ error_check_good before_sanity \
+ [lindex [lindex $ret 0] 0] \
+ [string range [lindex [lindex $ret 0] 1] 2 end]
+
+ error_check_good partial_put [eval {$dbc put -current \
+ -partial [list $doff $dlen] $d}] 0
+
+ set ret [$dbc get -current]
+ error_check_good partial_put_correct \
+ [lindex [lindex $ret 0] 1] $orig_d$d
+ }
+ }
+
+ puts "\tTest0$tnum.c: Double-checking get loop."
+ # Double-check that each datum in the regular db has
+ # been appropriately modified.
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good modification_correct \
+ [string range $d 2 end] [repeat $k 2]
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test063.tcl b/storage/bdb/test/test063.tcl
new file mode 100644
index 00000000000..2e8726c8f96
--- /dev/null
+++ b/storage/bdb/test/test063.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test063.tcl,v 11.17 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
+proc test063 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 63
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "key"
+ set data "data"
+ set key2 "another_key"
+ set data2 "more_data"
+
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set key "1"
+ set key2 "2"
+ append gflags " -recno"
+ }
+
+ puts "Test0$tnum: $method ($args) DB_RDONLY test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database."
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ # Put and get an item so it's nonempty.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good initial_put $ret 0
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good initial_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { $eindex == -1 } {
+ # Confirm that database is writable. If we are
+ # using an env (that may be remote on a server)
+ # we cannot do this check.
+ error_check_good writable [file writable $testfile] 1
+ }
+
+ puts "\tTest0$tnum.b: Re-opening DB_RDONLY and attempting to put."
+
+ # Now open it read-only and make sure we can get but not put.
+ set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
+ error_check_good put_failed $ret 1
+ error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set errorCode "NONE"
+
+ puts "\tTest0$tnum.c: Attempting cursor put."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good cursor_set [$dbc get -first] $dbt
+ set ret [catch {eval {$dbc put} -current $data} res]
+ error_check_good c_put_failed $ret 1
+ error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags {$key2}]
+ error_check_good db_get_key2 $dbt ""
+
+ puts "\tTest0$tnum.d: Attempting ordinary delete."
+
+ set errorCode "NONE"
+ set ret [catch {eval {$db del} $txn {$key}} 1]
+ error_check_good del_failed $ret 1
+ error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get_key $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest0$tnum.e: Attempting cursor delete."
+ # Just set the cursor to the beginning; we don't care what's there...
+ # yet.
+ set dbt2 [$dbc get -first]
+ error_check_good db_get_first_key $dbt2 $dbt
+ set errorCode "NONE"
+ set ret [catch {$dbc del} res]
+ error_check_good c_del_failed $ret 1
+ error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt2 [$dbc get -current]
+ error_check_good db_get_key $dbt2 $dbt
+
+ puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $omethod $args $testfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good first_there [$dbc get -first] \
+ [list [list $key [pad_data $method $data]]]
+ error_check_good nomore_there [$dbc get -next] ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test064.tcl b/storage/bdb/test/test064.tcl
new file mode 100644
index 00000000000..c306b0d9d46
--- /dev/null
+++ b/storage/bdb/test/test064.tcl
@@ -0,0 +1,69 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test064.tcl,v 11.13 2002/05/22 15:42:57 sue Exp $
+#
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
+proc test064 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 64
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->get_type test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database of type $method."
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.b: get_type after method specifier."
+
+ set db [eval {berkdb_open} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: get_type after DB_UNKNOWN."
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test065.tcl b/storage/bdb/test/test065.tcl
new file mode 100644
index 00000000000..ea29b4d2db7
--- /dev/null
+++ b/storage/bdb/test/test065.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test065.tcl,v 11.16 2002/08/22 18:18:50 sandstro Exp $
+#
+# TEST test065
+# TEST Test of DB->stat(DB_FASTSTAT)
+proc test065 { method args } {
+ source ./include.tcl
+ global errorCode
+ global alphabet
+
+ set nentries 10000
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 65
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
+
+ puts "\tTest0$tnum.a: Create database and check it while empty."
+
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [catch {eval $db stat -faststat} res]
+
+ error_check_good db_close [$db close] 0
+
+ if { ([is_record_based $method] && ![is_queue $method]) \
+ || [is_rbtree $method] } {
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
+ } else {
+ puts "\tTest0$tnum: Test complete for method $method."
+ return
+ }
+
+ # If we've got this far, we're on an access method for
+ # which record counts makes sense. Thus, we no longer
+ # catch EINVALs, and no longer care about __db_errs.
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
+
+ puts "\tTest0$tnum.b: put $nentries keys."
+
+ if { [is_record_based $method] } {
+ set gflags " -recno "
+ set keypfx ""
+ } else {
+ set gflags ""
+ set keypfx "key"
+ }
+
+ set txn ""
+ set data [pad_data $method $alphabet]
+
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest0$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_rrecno $method] == 1 } {
+ # if we're renumbering, when we hit key 5001 we'll
+ # have deleted 5000 and we'll croak! So delete key
+ # 1, repeatedly.
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
+ } else {
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
+ }
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
+ } else {
+ # No renumbering--no change in RECORDCOUNT!
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 } {
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
+ } elseif { [is_rbtree $method] == 1 } {
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
+ } else {
+ # No renumbering--still no change in RECORDCOUNT.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rbtree $method] != 1 } {
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
+ # or without renumbering.
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
+ } else {
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test066.tcl b/storage/bdb/test/test066.tcl
new file mode 100644
index 00000000000..13d0894dcae
--- /dev/null
+++ b/storage/bdb/test/test066.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test066.tcl,v 11.12 2002/05/24 15:24:56 sue Exp $
+#
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
+proc test066 { method args } {
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set tnum 66
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Test0$tnum: Skipping for method $method."
+ return
+ }
+
+ puts "Test0$tnum: Test of cursor put to DB_CURRENT with duplicates."
+
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test066.db
+ set env NULL
+ } else {
+ set testfile test066.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set key "test"
+ set data "olddata"
+
+ set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
+ $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -first]
+ error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
+
+ set newdata "newdata"
+ set ret [$dbc put -current [chop_data $method $newdata]]
+ error_check_good dbc_put $ret 0
+
+ # There should be only one (key,data) pair in the database, and this
+ # is it.
+ set ret [$dbc get -first]
+ error_check_good db_get_first $ret \
+ [list [list $key [pad_data $method $newdata]]]
+
+ # and this one should come up empty.
+ set ret [$dbc get -next]
+ error_check_good db_get_next $ret ""
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum: Test completed successfully."
+}
diff --git a/storage/bdb/test/test067.tcl b/storage/bdb/test/test067.tcl
new file mode 100644
index 00000000000..5f5a88c4be1
--- /dev/null
+++ b/storage/bdb/test/test067.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test067.tcl,v 11.19 2002/06/11 15:19:16 sue Exp $
+#
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
+proc test067 { method {ndups 1000} {tnum 67} args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+ set txn ""
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Partial puts on near-empty duplicate pages."
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a ($dupopt): Put $ndups duplicates."
+
+ set key "key_test$tnum"
+
+ for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
+ set data $alphabet$ndx
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # No need for pad_data since we're skipping recno.
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Sync so we can inspect database if the next section bombs.
+ error_check_good db_sync [$db sync] 0
+ puts "\tTest0$tnum.b ($dupopt):\
+ Deleting dups (last first), overwriting each."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ while { $count < $ndups - 1 } {
+ # set cursor to last item in db
+ set ret [$dbc get -last]
+ error_check_good \
+ verify_key [lindex [lindex $ret 0] 0] $key
+
+ # for error reporting
+ set currdatum [lindex [lindex $ret 0] 1]
+
+ # partial-overwrite it
+ # (overwrite offsets 1-4 with "bcde"--which they
+ # already are)
+
+ # Even though we expect success, we catch this
+ # since it might return EINVAL, and we want that
+ # to FAIL.
+ set errorCode NONE
+ set ret [catch {eval $dbc put -current \
+ {-partial [list 1 4]} "bcde"} \
+ res]
+ error_check_good \
+ partial_put_valid($currdatum) $errorCode NONE
+ error_check_good partial_put($currdatum) $res 0
+
+ # delete it
+ error_check_good dbc_del [$dbc del] 0
+
+ #puts $currdatum
+
+ incr count
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test068.tcl b/storage/bdb/test/test068.tcl
new file mode 100644
index 00000000000..31f4272ba55
--- /dev/null
+++ b/storage/bdb/test/test068.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test068.tcl,v 11.17 2002/06/11 15:34:47 sue Exp $
+#
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+proc test068 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set tnum 68
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set nkeys 1000
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
+ if { [is_record_based $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+
+ # Create a list of $nkeys words to insert into db.
+ puts "\tTest0$tnum.a: Initialize word list."
+ set txn ""
+ set wordlist {}
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nkeys } {
+ lappend wordlist $str
+ incr count
+ }
+ close $did
+
+ # Sanity check: did we get $nkeys words?
+ error_check_good enough_keys [llength $wordlist] $nkeys
+
+ # rbtree can't handle dups, so just test the non-dup case
+ # if it's the current method.
+ if { [is_rbtree $method] == 1 } {
+ set dupoptlist { "" }
+ } else {
+ set dupoptlist { "" "-dup" "-dup -dupsort" }
+ }
+
+ foreach dupopt $dupoptlist {
+ #
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
+ foreach word $wordlist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.c ($dupopt): get loop."
+ foreach word $wordlist {
+ # Make sure that the Nth word has been correctly
+ # inserted, and also that the Nth word is the
+ # Nth one we pull out of the database using a cursor.
+
+ set dbt [$db get $word]
+ error_check_good get_key [list [list $word $word]] $dbt
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
+
+ # Set cursor to the first key; make sure it succeeds.
+ # With an unsorted wordlist, we can't be sure that the
+ # first item returned will equal the first item in the
+ # wordlist, so we just make sure it got something back.
+ set dbt [eval {$dbc get -first}]
+ error_check_good \
+ dbc_get_first [llength $dbt] 1
+
+ # If -dup is not set, or if -dupsort is set too, we
+ # need to verify that DB_BEFORE and DB_AFTER fail
+ # and then move on to the next $dupopt.
+ if { $dupopt != "-dup" } {
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -after \
+ {-partial [list 6 0]} "after"} res]
+ error_check_good dbc_put_after_fail $ret 1
+ error_check_good dbc_put_after_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_AFTER returns EINVAL."
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -before \
+ {-partial [list 6 0]} "before"} res]
+ error_check_good dbc_put_before_fail $ret 1
+ error_check_good dbc_put_before_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_BEFORE returns EINVAL."
+ puts "\tTest0$tnum ($dupopt): Correct error returns,\
+ skipping further test."
+ # continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ continue
+ }
+
+ puts "\tTest0$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put after it
+ set ret [$dbc put -after -partial {4 0} after]
+ error_check_good dbc_put_after $ret 0
+ }
+
+ puts "\tTest0$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put before it
+ set ret [$dbc put -before -partial {6 0} before]
+ error_check_good dbc_put_before $ret 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ eval $db sync
+ puts "\tTest0$tnum.g ($dupopt): Verify correctness."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # loop through the whole db beginning to end,
+ # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
+ # {$word $word}, {$word "\0\0\0\0after"} for each word.
+ set count 0
+ while { $count < $nkeys } {
+ # Get the first item of each set of three.
+ # We don't know what the word is, but set $word to
+ # the key and check that the data is
+ # "\0\0\0\0\0\0before".
+ set dbt [$dbc get -next]
+ set word [lindex [lindex $dbt 0] 0]
+
+ error_check_good dbc_get_one $dbt \
+ [list [list $word "\0\0\0\0\0\0before"]]
+
+ set dbt [$dbc get -next]
+ error_check_good \
+ dbc_get_two $dbt [list [list $word $word]]
+
+ set dbt [$dbc get -next]
+ error_check_good dbc_get_three $dbt \
+ [list [list $word "\0\0\0\0after"]]
+
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test069.tcl b/storage/bdb/test/test069.tcl
new file mode 100644
index 00000000000..d986c861358
--- /dev/null
+++ b/storage/bdb/test/test069.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test069.tcl,v 11.7 2002/01/11 15:53:52 bostic Exp $
+#
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
+proc test069 { method {ndups 50} {tnum 69} args } {
+ eval test067 $method $ndups $tnum $args
+}
diff --git a/storage/bdb/test/test070.tcl b/storage/bdb/test/test070.tcl
new file mode 100644
index 00000000000..986fd079589
--- /dev/null
+++ b/storage/bdb/test/test070.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test070.tcl,v 11.27 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
+proc test070 { method {nconsumers 4} {nproducers 2} \
+ {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
+ source ./include.tcl
+ global alphabet
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test0$tnum skipping for env $env"
+ return
+ }
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test0$tnum skipping for security"
+ return
+ }
+
+ puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
+ puts "\tUsing $txn environment."
+
+ error_check_good enough_consumers [expr $nconsumers > 0] 1
+ error_check_good enough_producers [expr $nproducers > 0] 1
+
+ if { [is_queue $method] != 1 } {
+ puts "\tSkipping Test0$tnum for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+
+ # Create environment
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir]
+ error_check_good dbenv_create [is_valid_env $dbenv] TRUE
+
+ # Create database
+ set db [eval {berkdb_open -create -mode 0644 -queue}\
+ -env $dbenv $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $start != 0 } {
+ error_check_good set_seed [$db put $start "consumer data"] 0
+ puts "\tStarting at $start."
+ } else {
+ incr start
+ }
+
+ set pidlist {}
+
+ # Divvy up the total number of records amongst the consumers and
+ # producers.
+ error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
+ error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
+ set nperconsumer [expr $nitems / $nconsumers]
+ set nperproducer [expr $nitems / $nproducers]
+
+ set consumerlog $testdir/CONSUMERLOG.
+
+ # Fork consumer processes (we want them to be hungry)
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set output $consumerlog$ndx
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.consumer$ndx \
+ $testdir $testfile $mode $nperconsumer $output $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+ for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.producer$ndx \
+ $testdir $testfile PRODUCE $nperproducer "" $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+
+ # Wait for all children.
+ watch_procs $pidlist 10
+
+ # Verify: slurp all record numbers into list, sort, and make
+ # sure each appears exactly once.
+ puts "\tTest0$tnum: Verifying results."
+ set reclist {}
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set input $consumerlog$ndx
+ set iid [open $input r]
+ while { [gets $iid str] != -1 } {
+ # Convert high ints to negative ints, to
+ # simulate Tcl's behavior on a 32-bit machine
+ # even if we're on a 64-bit one.
+ if { $str > 0x7fffffff } {
+ set str [expr $str - 1 - 0xffffffff]
+ }
+ lappend reclist $str
+ }
+ close $iid
+ }
+ set sortreclist [lsort -integer $reclist]
+
+ set nitems [expr $start + $nitems]
+ for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Convert high ints to negative ints, to simulate
+ # 32-bit behavior on 64-bit platforms.
+ if { $ndx > 0x7fffffff } {
+ set cmp [expr $ndx - 1 - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
+ # Skip 0 if we are wrapping around
+ if { $cmp == 0 } {
+ incr ndx
+ incr nitems
+ incr cmp
+ }
+ # Be sure to convert ndx to a number before comparing.
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
+ set sortreclist [lreplace $sortreclist 0 0]
+ }
+ error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
+ error_check_good dbenv_close [$dbenv close] 0
+
+ puts "\tTest0$tnum completed successfully."
+}
diff --git a/storage/bdb/test/test071.tcl b/storage/bdb/test/test071.tcl
new file mode 100644
index 00000000000..3f2604022f1
--- /dev/null
+++ b/storage/bdb/test/test071.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test071.tcl,v 11.9 2002/01/11 15:53:53 bostic Exp $
+#
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
+proc test071 { method {nconsumers 1} {nproducers 1}\
+ {nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
+
+ eval test070 $method \
+ $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
+}
diff --git a/storage/bdb/test/test072.tcl b/storage/bdb/test/test072.tcl
new file mode 100644
index 00000000000..3c08f93975d
--- /dev/null
+++ b/storage/bdb/test/test072.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test072.tcl,v 11.27 2002/07/01 15:40:48 krinsky Exp $
+#
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
+proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "a key"
+ set key "the key"
+ set postkey "z key"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set postdatum "0987654321"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "\n Test of cursor stability when\
+ duplicates are moved off-page."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test0$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ incr testid
+ set duptestfile $testfile$testid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ puts \
+"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
+ error_check_good precursor [is_valid_cursor $precursor \
+ $db] TRUE
+ set postcursor [eval {$db cursor} $txn]
+ error_check_good postcursor [is_valid_cursor $postcursor \
+ $db] TRUE
+ error_check_good preset [$precursor get -set $prekey] \
+ [list [list $prekey $predatum]]
+ error_check_good postset [$postcursor get -set $postkey] \
+ [list [list $postkey $postdatum]]
+
+ puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
+ set end [expr $ndups * 2 - 1]
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j $i } { $j < $end } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ # Close cursors.
+ puts "\tTest0$tnum.d: Closing cursors."
+ for { set i 0 } { $i <= $end } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test073.tcl b/storage/bdb/test/test073.tcl
new file mode 100644
index 00000000000..02a0f3b0d19
--- /dev/null
+++ b/storage/bdb/test/test073.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test073.tcl,v 11.23 2002/05/22 15:42:59 sue Exp $
+#
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "the key"
+ set txn ""
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "cursor stability on duplicate pages."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test073: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize -dup"
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set is_long($i) 0
+ incr keys
+ }
+
+ puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ error_check_good "cursor close" [$curs close] 0
+
+ set is_long($i) 1
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.g: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+}
+
+# !!!: This procedure is also used by test087.
+proc makedatum_t73 { num is_long } {
+ global alphabet
+ if { $is_long == 1 } {
+ set a $alphabet$alphabet$alphabet
+ } else {
+ set a abcdefghijklm
+ }
+
+ # format won't do leading zeros, alas.
+ if { $num / 1000 > 0 } {
+ set i $num
+ } elseif { $num / 100 > 0 } {
+ set i 0$num
+ } elseif { $num / 10 > 0 } {
+ set i 00$num
+ } else {
+ set i 000$num
+ }
+
+ return $i$a
+}
+
+# !!!: This procedure is also used by test087.
+proc verify_t73 { is_long_array curs_array numkeys key } {
+ upvar $is_long_array is_long
+ upvar $curs_array dbc
+ upvar db db
+
+ #useful for debugging, perhaps.
+ eval $db sync
+
+ for { set j 0 } { $j < $numkeys } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ error_check_good\
+ "cursor $j key correctness (with $numkeys total items)"\
+ $k $key
+ error_check_good\
+ "cursor $j data correctness (with $numkeys total items)"\
+ $d [makedatum_t73 $j $is_long($j)]
+ }
+}
diff --git a/storage/bdb/test/test074.tcl b/storage/bdb/test/test074.tcl
new file mode 100644
index 00000000000..7f620db2d97
--- /dev/null
+++ b/storage/bdb/test/test074.tcl
@@ -0,0 +1,271 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test074.tcl,v 11.17 2002/05/24 15:24:56 sue Exp $
+#
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum 74} args } {
+ source ./include.tcl
+ global alphabet
+ global rand_init
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ berkdb srand $rand_init
+
+ # Data prefix--big enough that we get a mix of on-page, off-page,
+ # and multi-off-page dups with the default nitems
+ if { [is_fixed_length $method] == 1 } {
+ set globaldata "somedata"
+ } else {
+ set globaldata [repeat $alphabet 4]
+ }
+
+ puts "Test0$tnum $omethod ($args): Test of $dir"
+
+ # First, test non-dup (and not-very-interesting) case with
+ # all db types.
+
+ puts "\tTest0$tnum.a: No duplicates."
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-nodup.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-nodup.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ # Insert nitems items.
+ puts "\t\tTest0$tnum.a.1: Put loop."
+ for {set i 1} {$i <= $nitems} {incr i} {
+ #
+ # If record based, set key to $i * 2 to leave
+ # holes/unused entries for further testing.
+ #
+ if {[is_record_based $method] == 1} {
+ set key [expr $i * 2]
+ } else {
+ set key "key$i"
+ }
+ set data "$globaldata$i"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\t\tTest0$tnum.a.2: Get($dir)"
+
+ # foundarray($i) is set when key number i is found in the database
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Initialize foundarray($i) to zero for all $i
+ for {set i 1} {$i < $nitems} {incr i} {
+ set foundarray($i) 0
+ }
+
+ # Walk database using $dir and record each key gotten.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ if {[is_record_based $method] == 1} {
+ set num [expr $key / 2]
+ set desired_key $key
+ error_check_good $method:num $key [expr $num * 2]
+ } else {
+ set num [string range $key 3 end]
+ set desired_key key$num
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ [pad_data $method $globaldata$num]]]
+
+ set foundarray($num) 1
+ }
+
+ puts "\t\tTest0$tnum.a.3: Final key."
+ error_check_good last_db_get [$dbc get $dir] [list]
+
+ puts "\t\tTest0$tnum.a.4: Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # If we are a method that doesn't allow dups, verify that
+ # we get an empty list if we try to use DB_NEXT_DUP
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set dbt [$dbc get $dir]
+ error_check_good $method:nextdup [$dbc get -nextdup] [list]
+ error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good db_close(nodup) [$db close] 0
+
+ # Quit here if we're a method that won't allow dups.
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: Skipping remainder for method $method."
+ return
+ }
+
+ foreach opt { "-dup" "-dupsort" } {
+
+ #
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum$opt.db
+ } else {
+ set testfile test0$tnum$opt.db
+ }
+
+ if { [string compare $opt "-dupsort"] == 0 } {
+ set opt "-dup -dupsort"
+ }
+
+ puts "\tTest0$tnum.b: Duplicates ($opt)."
+
+ puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
+ set db [eval {berkdb_open -create -mode 0644}\
+ $opt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems different keys such that key i has i dups.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set key key$i
+
+ for {set j 1} {$j <= $i} {incr j} {
+ if { $j < 10 } {
+ set data "${globaldata}00$j"
+ } elseif { $j < 100 } {
+ set data "${globaldata}0$j"
+ } else {
+ set data "$globaldata$j"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ # Initialize foundarray($i) to 0 for all i.
+ unset foundarray
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set foundarray($i) 0
+ }
+
+ # Get loop--after each get, move forward a random increment
+ # within the duplicate set.
+ puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
+ set one "001"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ set num [string range $key 3 end]
+
+ set desired_key key$num
+ if { [string compare $dir "-prevnodup"] == 0 } {
+ if { $num < 10 } {
+ set one "00$num"
+ } elseif { $num < 100 } {
+ set one "0$num"
+ } else {
+ set one $num
+ }
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ "$globaldata$one"]]
+
+ set foundarray($num) 1
+
+ # Go forward by some number w/i dup set.
+ set inc [berkdb random_int 0 [expr $num - 1]]
+ for { set j 0 } { $j < $inc } { incr j } {
+ eval {$dbc get -nextdup}
+ }
+ }
+
+ puts "\t\tTest0$tnum.b.3 ($opt): Final key."
+ error_check_good last_db_get($opt) [$dbc get $dir] [list]
+
+ # Verify
+ puts "\t\tTest0$tnum.b.4 ($opt): Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test075.tcl b/storage/bdb/test/test075.tcl
new file mode 100644
index 00000000000..540d8f0ed73
--- /dev/null
+++ b/storage/bdb/test/test075.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test075
+# TEST Test of DB->rename().
+# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+proc test075 { method { tnum 75 } args } {
+ global encrypt
+ global errorCode
+ global errorInfo
+
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: $method ($args): Test of DB->rename()"
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping test075 for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Skipping test075 for security"
+ return
+ }
+
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Set up absolute and relative pathnames for test
+ set paths [list $fulldir $reldir]
+ foreach path $paths {
+ puts "\tTest0$tnum: starting test of $path path"
+ set oldfile $path/test0$tnum-old.db
+ set newfile $path/test0$tnum.db
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tTest0$tnum.a: Create/rename file with $op"
+
+ # Make sure we're starting with a clean slate.
+
+ if { $op == "noenv" } {
+ cleanup $path $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+ }
+ }
+
+ if { $op == "env" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ if { $op == "auto" || $op == "commit" || $op == "abort" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path -txn]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # Regular renames use berkdb dbrename but transaction
+ # protected renames must use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename_file [eval {berkdb dbrename} \
+ $envargs $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename_file [eval {$env dbrename} \
+ -auto_commit $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename_file [eval {$env dbrename} \
+ -txn $txn $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or
+ # anything silly. In the normal case (no env), we already
+ # know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be empty,
+ # not the "new" one, except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] $data
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's
+ # an external environment, but that env has
+ # errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ set ret [catch {eval {berkdb dbrename} \
+ -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+ }
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tTest0$tnum.c: "
+ cleanup $path $env
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ }
+ }
+ }
+}
diff --git a/storage/bdb/test/test076.tcl b/storage/bdb/test/test076.tcl
new file mode 100644
index 00000000000..9f7b1ed2972
--- /dev/null
+++ b/storage/bdb/test/test076.tcl
@@ -0,0 +1,80 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test076.tcl,v 1.18 2002/07/08 20:16:31 sue Exp $
+#
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
+proc test076 { method { ndbs 1000 } { tnum 76 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ } else {
+ set key "key"
+ }
+ set data "datamoredatamoredata"
+
+ # Create an env if we weren't passed one.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set deleteenv 1
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $encargs]
+ error_check_good env [is_valid_env $env] TRUE
+ set args "$args -env $env"
+ } else {
+ set deleteenv 0
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
+ cleanup $testdir $env
+ set txn ""
+
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set testfile test0$tnum.$i.db
+
+ set db [eval {berkdb_open -create -mode 0644}\
+ $args $omethod $testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close($i) [$db close] 0
+ }
+
+ if { $deleteenv == 1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTest0$tnum passed."
+}
diff --git a/storage/bdb/test/test077.tcl b/storage/bdb/test/test077.tcl
new file mode 100644
index 00000000000..99cf432af20
--- /dev/null
+++ b/storage/bdb/test/test077.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test077.tcl,v 1.10 2002/05/24 15:24:57 sue Exp $
+#
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
+proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: Test of DB_GET_RECNO."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest0$tnum: Skipping for method $method."
+ return
+ }
+
+ set data $alphabet
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a: Populating database."
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set key [format %5d $i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.b: Verifying record numbers."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
+
+ set i 1
+ for { set dbt [$dbc get -first] } \
+ { [string length $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set recno [$dbc get -get_recno]
+ set keynum [expr [lindex [lindex $dbt 0] 0]]
+
+ # Verify that i, the number that is the key, and recno
+ # are all equal.
+ error_check_good key($i) $keynum $i
+ error_check_good recno($i) $recno $i
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test078.tcl b/storage/bdb/test/test078.tcl
new file mode 100644
index 00000000000..45a1d46466e
--- /dev/null
+++ b/storage/bdb/test/test078.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test078.tcl,v 1.18 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
+proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
+ source ./include.tcl
+ global alphabet rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of key counts."
+
+ berkdb srand $rand_init
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.a: No duplicates, trivial answer."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test078: skipping for specific pagesizes"
+ return
+ }
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.a [$db count $i] 1
+ }
+ error_check_good db_close.a [$db close] 0
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts \
+ "\tTest0$tnum.b: Duplicates not supported in $method, skipping."
+ return
+ }
+
+ foreach tuple {{b sorted "-dup -dupsort"} {c unsorted "-dup"}} {
+ set letter [lindex $tuple 0]
+ set dupopt [lindex $tuple 2]
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
+
+ puts "\t\tTest0$tnum.$letter.1: Populating database."
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ for { set j 0 } { $j < $i } { incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$letter,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.2: "
+ puts "Verifying dup counts on first dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.3: "
+ puts "Verifying dup counts on random dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ set key [berkdb random_int 1 $nkeys]
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+ error_check_good db_close.$letter [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test079.tcl b/storage/bdb/test/test079.tcl
new file mode 100644
index 00000000000..70fd4e05090
--- /dev/null
+++ b/storage/bdb/test/test079.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test079.tcl,v 11.8 2002/01/11 15:53:54 bostic Exp $
+#
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
+proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
+ if { [ is_queueext $method ] == 1 } {
+ set method "queue";
+ lappend args "-extent" "20"
+ }
+ eval {test006 $method $nentries 1 $tnum -pagesize $pagesize} $args
+}
diff --git a/storage/bdb/test/test080.tcl b/storage/bdb/test/test080.tcl
new file mode 100644
index 00000000000..9f649496f68
--- /dev/null
+++ b/storage/bdb/test/test080.tcl
@@ -0,0 +1,126 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test080
+# TEST Test of DB->remove()
+proc test080 { method {tnum 80} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of DB->remove()"
+
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ # Test both relative and absolute path
+ set paths [list $fulldir $testdir]
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != ""} {
+ puts "Skipping test080 for security"
+ return
+ }
+ if { $eindex != -1 } {
+ incr eindex
+ set e [lindex $args $eindex]
+ puts "Skipping test080 for env $e"
+ return
+ }
+
+ foreach path $paths {
+
+ set dbfile test0$tnum.db
+ set testfile $path/$dbfile
+
+ # Loop through test using the following remove options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ # Make sure we're starting with a clean slate.
+ env_cleanup $testdir
+ if { $op == "noenv" } {
+ set dbfile $testfile
+ set e NULL
+ set envargs ""
+ } else {
+ if { $op == "env" } {
+ set largs ""
+ } else {
+ set largs " -txn"
+ }
+ set e [eval {berkdb_env -create -home $path} $largs]
+ set envargs "-env $e"
+ error_check_good env_open [is_valid_env $e] TRUE
+ }
+
+ puts "\tTest0$tnum: dbremove with $op in $path"
+ puts "\tTest0$tnum.a.1: Create file"
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $envargs $args {$dbfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good file_exists_before \
+ [file exists $testfile] 1
+
+ # Use berkdb dbremove for non-transactional tests
+ # and $env dbremove for transactional tests
+ puts "\tTest0$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} $envargs $dbfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$e dbremove} -auto_commit $dbfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$e txn]
+ error_check_good remove_$op \
+ [eval {$e dbremove} -txn $txn $dbfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\tTest0$tnum.a.3: Check that file is gone"
+ # File should now be gone, except in the case of an abort.
+ if { $op != "abort" } {
+ error_check_good exists_after \
+ [file exists $testfile] 0
+ } else {
+ error_check_good exists_after \
+ [file exists $testfile] 1
+ }
+
+ if { $e != "NULL" } {
+ error_check_good env_close [$e close] 0
+ }
+
+ set dbfile test0$tnum-old.db
+ set testfile $path/$dbfile
+ }
+ }
+}
diff --git a/storage/bdb/test/test081.tcl b/storage/bdb/test/test081.tcl
new file mode 100644
index 00000000000..37c2b44ac33
--- /dev/null
+++ b/storage/bdb/test/test081.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test081.tcl,v 11.6 2002/01/11 15:53:55 bostic Exp $
+#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
+proc test081 { method {ndups 13} {tnum 81} args} {
+ source ./include.tcl
+
+ eval {test017 $method 1 $ndups $tnum} $args
+}
diff --git a/storage/bdb/test/test082.tcl b/storage/bdb/test/test082.tcl
new file mode 100644
index 00000000000..e8c1fa45a92
--- /dev/null
+++ b/storage/bdb/test/test082.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test082.tcl,v 11.5 2002/01/11 15:53:55 bostic Exp $
+#
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum 82} args} {
+ source ./include.tcl
+
+ eval {test074 $method $dir $nitems $tnum} $args
+}
diff --git a/storage/bdb/test/test083.tcl b/storage/bdb/test/test083.tcl
new file mode 100644
index 00000000000..e4168ee1c43
--- /dev/null
+++ b/storage/bdb/test/test083.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test083.tcl,v 11.13 2002/06/24 14:06:38 sue Exp $
+#
+# TEST test083
+# TEST Test of DB->key_range.
+proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test083 $method ($args): Test of DB->key_range"
+ if { [is_btree $method] != 1 } {
+ puts "\tTest083: Skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test083: skipping for specific pagesizes"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test083.db
+ set env NULL
+ } else {
+ set testfile test083.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # We assume that numbers will be at most six digits wide
+ error_check_bad maxitems_range [expr $maxitems > 999999] 1
+
+ # We want to test key_range on a variety of sizes of btree.
+ # Start at ten keys and work up to $maxitems keys, at each step
+ # multiplying the number of keys by $step.
+ for { set nitems 10 } { $nitems <= $maxitems }\
+ { set nitems [expr $nitems * $step] } {
+
+ puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} \
+ -pagesize $pgsz $omethod $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv
+
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc t83_build { db nitems env txnenv } {
+ source ./include.tcl
+
+ puts "\tTest083.b: Populating database with $nitems keys"
+
+ set keylist {}
+ puts "\t\tTest083.b.1: Generating key list"
+ for { set i 0 } { $i < $nitems } { incr i } {
+ lappend keylist $i
+ }
+
+ # With randomly ordered insertions, the range of errors we
+ # get from key_range can be unpredictably high [#2134]. For now,
+ # just skip the randomization step.
+ #puts "\t\tTest083.b.2: Randomizing key list"
+ #set keylist [randomize_list $keylist]
+ #puts "\t\tTest083.b.3: Populating database with randomized keys"
+
+ puts "\t\tTest083.b.2: Populating database"
+ set data [repeat . 50]
+ set txn ""
+ foreach keynum $keylist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+}
+
+proc t83_test { db nitems env txnenv } {
+ # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
+ # all the way through the database. Make sure the key_ranges
+ # aren't off by more than 10%.
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest083.c: Verifying ranges..."
+
+ for { set i 0 } { $i < $nitems } \
+ { incr i [expr $nitems / [berkdb random_int 3 16]] } {
+ puts "\t\t...key $i"
+ error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
+
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_bad key$j \
+ [llength [set dbt [$dbc get -next]]] 0
+ }
+
+ set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
+
+ #puts $ranges
+ error_check_good howmanyranges [llength $ranges] 3
+
+ set lessthan [lindex $ranges 0]
+ set morethan [lindex $ranges 2]
+
+ set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
+
+ roughly_equal $rangesum 1 0.05
+
+ # Wild guess.
+ if { $nitems < 500 } {
+ set tol 0.3
+ } elseif { $nitems > 500 } {
+ set tol 0.15
+ }
+
+ roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+}
+
+proc roughly_equal { a b tol } {
+ error_check_good "$a =~ $b" [expr $a - $b < $tol] 1
+}
diff --git a/storage/bdb/test/test084.tcl b/storage/bdb/test/test084.tcl
new file mode 100644
index 00000000000..89bc13978b0
--- /dev/null
+++ b/storage/bdb/test/test084.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test084.tcl,v 11.11 2002/07/13 18:09:14 margo Exp $
+#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
+proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-empty.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-empty.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test084: skipping for specific pagesizes"
+ return
+ }
+
+ cleanup $testdir $env
+
+ set args "-pagesize $pagesize $args"
+
+ eval {test001 $method $nentries 0 $tnum 0} $args
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ # For good measure, create a second database that's empty
+ # with the large page size. (There was a verifier bug that
+ # choked on empty 64K pages. [#2408])
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good empty_db [is_valid_db $db] TRUE
+ error_check_good empty_db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test085.tcl b/storage/bdb/test/test085.tcl
new file mode 100644
index 00000000000..b0412d6fe68
--- /dev/null
+++ b/storage/bdb/test/test085.tcl
@@ -0,0 +1,332 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test085.tcl,v 1.13 2002/08/08 17:23:46 sandstro Exp $
+#
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
+proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test085: skipping for specific pagesizes"
+ return
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "AA"
+ set key "BBB"
+ set postkey "CCCC"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set datum $alphabet
+ set postdatum "0987654321"
+ set txn ""
+
+ append args " -pagesize $pagesize -dup"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+
+ # Skip for all non-btrees. (Rbtrees don't count as btrees, for
+ # now, since they don't support dups.)
+ if { [is_btree $method] != 1 } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Duplicates w/ deleted item cursor."
+ }
+
+ # Repeat the test with both on-page and off-page numbers of dups.
+ foreach ndups "$onp $offp" {
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
+ # come before and after them given a placement of
+ # the deleted item at the beginning or end of the dupset.
+ set final [expr $ndups - 1]
+ set putops {
+ {{-before} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-before} "" {[test085_ddatum $final]} $postdatum end}
+ {{-current} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-current} "" {[test085_ddatum $final]} $postdatum end}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
+ {{-after} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-after} "" {[test085_ddatum $final]} $postdatum end}
+ }
+
+ # Get operations we want to test on a cursor set to the
+ # deleted item, any args to get, and the expected key/data pair.
+ set getops {
+ {{-current} "" "" "" beginning}
+ {{-current} "" "" "" end}
+ {{-next} "" $key {[test085_ddatum 0]} beginning}
+ {{-next} "" $postkey $postdatum end}
+ {{-prev} "" $prekey $predatum beginning}
+ {{-prev} "" $key {[test085_ddatum $final]} end}
+ {{-first} "" $prekey $predatum beginning}
+ {{-first} "" $prekey $predatum end}
+ {{-last} "" $postkey $postdatum beginning}
+ {{-last} "" $postkey $postdatum end}
+ {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
+ {{-nextdup} "" EMPTYLIST "" end}
+ {{-nextnodup} "" $postkey $postdatum beginning}
+ {{-nextnodup} "" $postkey $postdatum end}
+ {{-prevnodup} "" $prekey $predatum beginning}
+ {{-prevnodup} "" $prekey $predatum end}
+ }
+
+ set txn ""
+ foreach pair $getops {
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $encargs $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ set gargs [lindex $pair 1]
+ set ekey ""
+ set edata ""
+ eval set ekey [lindex $pair 2]
+ eval set edata [lindex $pair 3]
+
+ set dbt [eval $dbc get $op $gargs]
+ if { [string compare $ekey EMPTYLIST] == 0 } {
+ error_check_good dbt($op,$ndups) \
+ [llength $dbt] 0
+ } else {
+ error_check_good dbt($op,$ndups) $dbt \
+ [list [list $ekey $edata]]
+ }
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
+ }
+
+ foreach pair $putops {
+ # Open and set up database.
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $args $encargs $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ # Put duplicates.
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ # Set up cursors for stability test.
+ set pre_dbc [eval {$db cursor} $txn]
+ error_check_good pre_set [$pre_dbc get -set $prekey] \
+ [list [list $prekey $predatum]]
+ set post_dbc [eval {$db cursor} $txn]
+ error_check_good post_set [$post_dbc get -set $postkey]\
+ [list [list $postkey $postdatum]]
+ set first_dbc [eval {$db cursor} $txn]
+ error_check_good first_set \
+ [$first_dbc get -get_both $key [test085_ddatum 0]] \
+ [list [list $key [test085_ddatum 0]]]
+ set last_dbc [eval {$db cursor} $txn]
+ error_check_good last_set \
+ [$last_dbc get -get_both $key [test085_ddatum \
+ [expr $ndups - 1]]] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ set k [lindex $pair 1]
+ set d_before ""
+ set d_after ""
+ eval set d_before [lindex $pair 2]
+ eval set d_after [lindex $pair 3]
+ set newdatum "NewDatum"
+ error_check_good dbc_put($op,$ndups) \
+ [eval $dbc put $op $k $newdatum] 0
+ error_check_good dbc_prev($op,$ndups) \
+ [lindex [lindex [$dbc get -prev] 0] 1] \
+ $d_before
+ error_check_good dbc_current($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $newdatum
+
+ error_check_good dbc_next($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $d_after
+
+ # Verify stability of pre- and post- cursors.
+ error_check_good pre_stable [$pre_dbc get -current] \
+ [list [list $prekey $predatum]]
+ error_check_good post_stable [$post_dbc get -current] \
+ [list [list $postkey $postdatum]]
+ error_check_good first_stable \
+ [$first_dbc get -current] \
+ [list [list $key [test085_ddatum 0]]]
+ error_check_good last_stable \
+ [$last_dbc get -current] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
+ error_check_good ${c}_close [$c close] 0
+ }
+
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+ }
+ }
+}
+
+# Set up the test database; put $prekey, $key, and $postkey with their
+# respective data, and then delete $key with a new cursor. Return that
+# cursor, still pointing to the deleted item.
+proc test085_setup { db txn } {
+ upvar key key
+ upvar prekey prekey
+ upvar postkey postkey
+ upvar predatum predatum
+ upvar postdatum postdatum
+
+ # no one else should ever see this one!
+ set datum "bbbbbbbb"
+
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good dbc_getset [$dbc get -get_both $key $datum] \
+ [list [list $key $datum]]
+
+ error_check_good dbc_del [$dbc del] 0
+
+ return $dbc
+}
+
+proc test085_ddatum { a } {
+ global alphabet
+ return $a$alphabet
+}
diff --git a/storage/bdb/test/test086.tcl b/storage/bdb/test/test086.tcl
new file mode 100644
index 00000000000..e15aa1d8bb9
--- /dev/null
+++ b/storage/bdb/test/test086.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test086.tcl,v 11.9 2002/08/06 17:58:00 sandstro Exp $
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
+proc test086 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across aborted\
+ btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then this test won't work.
+ if { $eindex == -1 } {
+ # But we will be using our own env...
+ set testfile test0$tstn.db
+ } else {
+ puts "\tTest$tstn: Environment provided; skipping test."
+ return
+ }
+ set t1 $testdir/t1
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good berkdb_env [is_valid_env $env] TRUE
+
+ puts "\tTest$tstn.a: Create $method database."
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put -txn $txn key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good commit [$txn commit] 0
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for {set i 0; set ret [$db get -txn $txn key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get -txn $txn key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor -txn $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # Create child txn.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $ctxn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $ctxn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $ctxn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.f: Check and see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ # Put (and this time keep) the keys that caused the split.
+ # We'll delete them to test reverse splits.
+ puts "\tTest$tstn.g: Put back added keys."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $txn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $txn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $txn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Delete added keys to force reverse split."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key00$i] 0
+ } else {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.i: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.j: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+
+ error_check_good commit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test087.tcl b/storage/bdb/test/test087.tcl
new file mode 100644
index 00000000000..089664a0002
--- /dev/null
+++ b/storage/bdb/test/test087.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test087.tcl,v 11.14 2002/07/08 20:16:31 sue Exp $
+#
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
+ source ./include.tcl
+ global alphabet
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum $omethod ($args): "
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then return
+ if { $eindex != -1 } {
+ puts "Environment specified; skipping."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test087: skipping for specific pagesizes"
+ return
+ }
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+ set key "the key"
+ append args " -pagesize $pagesize -dup"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Cursor stability on dup. pages w/ aborts."
+ }
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys $ndups
+
+ puts "\tTest0$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+
+ verify_t73 is_long dbc [expr $i - 1] $key
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
+
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ # We can't do a verification while a child txn is active,
+ # or we'll run into trouble when DEBUG_ROP is enabled.
+ # If this test has trouble, though, uncommenting this
+ # might be illuminating--it makes things a bit more rigorous
+ # and works fine when DEBUG_ROP is not enabled.
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ # verify_t73 is_long dbc $keys $key
+ # verify_t73 is_long dbc $keys $key
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ set is_long($i) 1
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set is_long($i) 0
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Now delete the first item, abort the deletion, and make sure
+ # we're still sane.
+ puts "\tTest0$tnum.g: Cursor delete first item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for the last item.
+ puts "\tTest0$tnum.h: Cursor delete last item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 [expr $keys - 1] 0]
+ error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for all the items.
+ puts "\tTest0$tnum.i: Cursor delete all items, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ for { set i 1 } { $i < $keys } { incr i } {
+ error_check_good "c_get(DB_NEXT, $i)"\
+ [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
+ error_check_good "c_del($i)" [$curs del] 0
+ }
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.j: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good "db close" [$db close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good "env close" [$env close] 0
+}
diff --git a/storage/bdb/test/test088.tcl b/storage/bdb/test/test088.tcl
new file mode 100644
index 00000000000..7065b4cd642
--- /dev/null
+++ b/storage/bdb/test/test088.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test088.tcl,v 11.12 2002/08/05 19:23:51 sandstro Exp $
+#
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
+proc test088 { method args } {
+ global errorCode alphabet
+ source ./include.tcl
+
+ set tstn 088
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test088: skipping for specific pagesizes"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key$alphabet$alphabet$alphabet"
+ set data "data$alphabet$alphabet$alphabet"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tstn.db
+ set env NULL
+ } else {
+ set testfile test$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ key/data pairs.
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
+ for {set i 0; set ret [$db get ${key}00000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get ${key}00000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure splits happened."
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db del} $txn {${key}0$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db del} $txn {${key}00$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db del} $txn {${key}000$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db del} $txn {${key}0000$i}]
+ } else {
+ set ret [eval {$db del} $txn {${key}00000$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test089.tcl b/storage/bdb/test/test089.tcl
new file mode 100644
index 00000000000..d378152f203
--- /dev/null
+++ b/storage/bdb/test/test089.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test089.tcl,v 11.2 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ # This tests the failure found in #1923
+ puts "\tTest089.b: test delete then get"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest089.c: CDB cursor dups"
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+ set stat [catch {$dbc dup} ret]
+ error_check_bad wr_cdup_stat $stat 0
+ error_check_good wr_cdup [is_substr $ret \
+ "Cannot duplicate writeable cursor"] 1
+
+ set dbc_ro [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc_ro $db1] TRUE
+ set dup_dbc [$dbc_ro dup]
+ error_check_good rd_cdup [is_valid_cursor $dup_dbc $db1] TRUE
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc_close [$dbc_ro close] 0
+ error_check_good dbc_close [$dup_dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+ append oargs " -dup "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ set dbc [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest089.f: test delete then get of off-page dups"
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/test090.tcl b/storage/bdb/test/test090.tcl
new file mode 100644
index 00000000000..da90688ffc5
--- /dev/null
+++ b/storage/bdb/test/test090.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test090.tcl,v 11.10 2002/08/15 20:55:21 sandstro Exp $
+#
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {txn -txn} {tnum "90"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test001 $method $nentries 4294967000 $tnum 0} $args
+}
diff --git a/storage/bdb/test/test091.tcl b/storage/bdb/test/test091.tcl
new file mode 100644
index 00000000000..cfd2a60ebb5
--- /dev/null
+++ b/storage/bdb/test/test091.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test091.tcl,v 11.7 2002/01/11 15:53:56 bostic Exp $
+#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
+proc test091 { method {nconsumers 4} \
+ {nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
+ if { [is_queue $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
+}
diff --git a/storage/bdb/test/test092.tcl b/storage/bdb/test/test092.tcl
new file mode 100644
index 00000000000..29c1c55a9a9
--- /dev/null
+++ b/storage/bdb/test/test092.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test092.tcl,v 11.13 2002/02/22 15:26:28 sandstro Exp $
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty read and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -dirty]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
+ -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.c: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -dirty} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.e: Check dirty data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.f: Check dirty data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.g: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.h: Check clean data using -dirty cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.i: Check clean data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.j: Check clean data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/storage/bdb/test/test093.tcl b/storage/bdb/test/test093.tcl
new file mode 100644
index 00000000000..e3f8f0103c6
--- /dev/null
+++ b/storage/bdb/test/test093.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test093.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test093
+# TEST Test using set_bt_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test093 { method {nentries 10000} {tnum "93"} args} {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ }
+ puts "Test0$tnum: $method ($args) $nentries using btcompare"
+
+
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
+ #
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so verification doesn't fail. (There's currently
+ # no way to specify a comparison function to berkdb dbverify.)
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_check
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_checkbig
+ puts "\tTest0$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/storage/bdb/test/test094.tcl b/storage/bdb/test/test094.tcl
new file mode 100644
index 00000000000..781052913f4
--- /dev/null
+++ b/storage/bdb/test/test094.tcl
@@ -0,0 +1,251 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test094.tcl,v 11.16 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "94"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr -dupcompare test094_cmp \
+ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest0$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest0$tnum.c: big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest0$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Clean up the test directory, since there's currently
+ # no way to specify a dup_compare function to berkdb dbverify
+ # and without one it will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc test094_dup_big { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
diff --git a/storage/bdb/test/test095.tcl b/storage/bdb/test/test095.tcl
new file mode 100644
index 00000000000..5543f346b7e
--- /dev/null
+++ b/storage/bdb/test/test095.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test095.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test095
+# TEST Bulk get test. [#2934]
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.h: Growing\
+ database with $noverflows overflow sets (max item size $max)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum i [expr 10 * 8192] 1
+ t95_gettest $db $tnum j [expr $max * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l [expr 10 * 8192] 1
+ t95_cgettest $db $tnum m [expr $max * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Test of invalid flag combinations for -multi
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi "
+ } else {
+ set action "dbc get -multi "
+ }
+ puts "\tTest0$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for basic -multi get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach flag $badflags {
+ catch {eval $db get -multi $bufsize -$flag} ret
+ error_check_good \
+ db:get:multi:$flag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for cursor -multi get
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach flag $cbadflags {
+ catch {eval $dbc get -multi $bufsize -$flag} ret
+ error_check_good dbc:get:multi:$flag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ puts "\t\tTest0$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/storage/bdb/test/test096.tcl b/storage/bdb/test/test096.tcl
new file mode 100644
index 00000000000..042df19eac7
--- /dev/null
+++ b/storage/bdb/test/test096.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test096.tcl,v 11.19 2002/08/19 20:09:29 margo Exp $
+#
+# TEST test096
+# TEST Db->truncate test.
+proc test096 { method {pagesize 512} {nentries 50} {ndups 4} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -auto_commit]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txncommit [$txn commit] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -auto_commit -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/test097.tcl b/storage/bdb/test/test097.tcl
new file mode 100644
index 00000000000..6e43b820b2f
--- /dev/null
+++ b/storage/bdb/test/test097.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test097.tcl,v 11.8 2002/09/04 18:47:42 sue Exp $
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create \
+ -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Create the database and open the dictionary
+ set testfile test097.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $testfile $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env testfile largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $testfile.$i}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/storage/bdb/test/test098.tcl b/storage/bdb/test/test098.tcl
new file mode 100644
index 00000000000..320e0258a84
--- /dev/null
+++ b/storage/bdb/test/test098.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test098.tcl,v 1.5 2002/07/11 20:38:36 sandstro Exp $
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/storage/bdb/test/test099.tcl b/storage/bdb/test/test099.tcl
new file mode 100644
index 00000000000..db177ce5fff
--- /dev/null
+++ b/storage/bdb/test/test099.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test099.tcl,v 1.2 2002/08/08 15:38:13 bostic Exp $
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+# global kvals
+# set key [expr $count]
+# set kvals($key) [pad_data $method $str]
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/storage/bdb/test/test100.tcl b/storage/bdb/test/test100.tcl
new file mode 100644
index 00000000000..f80b2e526dd
--- /dev/null
+++ b/storage/bdb/test/test100.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test100.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {txn -txn} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/storage/bdb/test/test101.tcl b/storage/bdb/test/test101.tcl
new file mode 100644
index 00000000000..7e5c8fc30fc
--- /dev/null
+++ b/storage/bdb/test/test101.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test101.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 10000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 1000 WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/storage/bdb/test/testparams.tcl b/storage/bdb/test/testparams.tcl
new file mode 100644
index 00000000000..6628db532d7
--- /dev/null
+++ b/storage/bdb/test/testparams.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testparams.tcl,v 11.117 2002/09/05 02:30:00 margo Exp $
+
+set subs {bigfile dead env lock log memp mutex recd rep rpc rsrc \
+ sdb sdbtest sec si test txn}
+
+set num_test(bigfile) 2
+set num_test(dead) 7
+set num_test(env) 11
+set num_test(lock) 5
+set num_test(log) 5
+set num_test(memp) 3
+set num_test(mutex) 3
+set num_test(recd) 20
+set num_test(rep) 5
+set num_test(rpc) 5
+set num_test(rsrc) 4
+set num_test(sdb) 12
+set num_test(sdbtest) 2
+set num_test(sec) 2
+set num_test(si) 6
+set num_test(test) 101
+set num_test(txn) 9
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
+set parms(subdb001) ""
+set parms(subdb002) 10000
+set parms(subdb003) 1000
+set parms(subdb004) ""
+set parms(subdb005) 100
+set parms(subdb006) 100
+set parms(subdb007) ""
+set parms(subdb008) ""
+set parms(subdb009) ""
+set parms(subdb010) ""
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(test001) {10000 0 "01" 0}
+set parms(test002) 10000
+set parms(test003) ""
+set parms(test004) {10000 4 0}
+set parms(test005) 10000
+set parms(test006) {10000 0 6}
+set parms(test007) {10000 7}
+set parms(test008) {8 0}
+set parms(test009) ""
+set parms(test010) {10000 5 10}
+set parms(test011) {10000 5 11}
+set parms(test012) ""
+set parms(test013) 10000
+set parms(test014) 10000
+set parms(test015) {7500 0}
+set parms(test016) 10000
+set parms(test017) {0 19 17}
+set parms(test018) 10000
+set parms(test019) 10000
+set parms(test020) 10000
+set parms(test021) 10000
+set parms(test022) ""
+set parms(test023) ""
+set parms(test024) 10000
+set parms(test025) {10000 0 25}
+set parms(test026) {2000 5 26}
+set parms(test027) {100}
+set parms(test028) ""
+set parms(test029) 10000
+set parms(test030) 10000
+set parms(test031) {10000 5 31}
+set parms(test032) {10000 5 32}
+set parms(test033) {10000 5 33}
+set parms(test034) 10000
+set parms(test035) 10000
+set parms(test036) 10000
+set parms(test037) 100
+set parms(test038) {10000 5 38}
+set parms(test039) {10000 5 39}
+set parms(test040) 10000
+set parms(test041) 10000
+set parms(test042) 1000
+set parms(test043) 10000
+set parms(test044) {5 10 0}
+set parms(test045) 1000
+set parms(test046) ""
+set parms(test047) ""
+set parms(test048) ""
+set parms(test049) ""
+set parms(test050) ""
+set parms(test051) ""
+set parms(test052) ""
+set parms(test053) ""
+set parms(test054) ""
+set parms(test055) ""
+set parms(test056) ""
+set parms(test057) ""
+set parms(test058) ""
+set parms(test059) ""
+set parms(test060) ""
+set parms(test061) ""
+set parms(test062) {200 200 62}
+set parms(test063) ""
+set parms(test064) ""
+set parms(test065) ""
+set parms(test066) ""
+set parms(test067) {1000 67}
+set parms(test068) ""
+set parms(test069) {50 69}
+set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
+set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
+set parms(test072) {512 20 72}
+set parms(test073) {512 50 73}
+set parms(test074) {-nextnodup 100 74}
+set parms(test075) {75}
+set parms(test076) {1000 76}
+set parms(test077) {1000 512 77}
+set parms(test078) {100 512 78}
+set parms(test079) {10000 512 79}
+set parms(test080) {80}
+set parms(test081) {13 81}
+set parms(test082) {-prevnodup 100 82}
+set parms(test083) {512 5000 2}
+set parms(test084) {10000 84 65536}
+set parms(test085) {512 3 10 85}
+set parms(test086) ""
+set parms(test087) {512 50 87}
+set parms(test088) ""
+set parms(test089) 1000
+set parms(test090) {10000 -txn 90}
+set parms(test091) {4 2 1000 0 91}
+set parms(test092) {1000}
+set parms(test093) {10000 93}
+set parms(test094) {10000 10 94}
+set parms(test095) {1000 25 95}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 -txn 100}
+set parms(test101) {10000 -txn 101}
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.javatests }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+}
diff --git a/storage/bdb/test/testutils.tcl b/storage/bdb/test/testutils.tcl
new file mode 100644
index 00000000000..d1f89dd1e15
--- /dev/null
+++ b/storage/bdb/test/testutils.tcl
@@ -0,0 +1,3209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testutils.tcl,v 11.165 2002/09/05 17:54:04 sandstro Exp $
+#
+# Test system utilities
+#
+# Timestamp -- print time along with elapsed time since last invocation
+# of timestamp.
+proc timestamp {{opt ""}} {
+ global __timestamp_start
+
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
+ if {[string compare $opt "-r"] == 0} {
+ return $now
+ } elseif {[string compare $opt "-t"] == 0} {
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
+ } else {
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
+
+ if {[catch {set start $__timestamp_start}] != 0} {
+ set __timestamp_start $now
+ }
+ set start $__timestamp_start
+
+ set elapsed [expr $now - $start]
+ set the_time [clock format $now -format ""]
+ set __timestamp_start $now
+
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
+ [__fix_num [clock format $now -format "%H"]] \
+ [__fix_num [clock format $now -format "%M"]] \
+ [__fix_num [clock format $now -format "%S"]] \
+ [expr $elapsed / 3600] \
+ [expr ($elapsed % 3600) / 60] \
+ [expr ($elapsed % 3600) % 60]
+ }
+}
+
+proc __fix_num { num } {
+ set num [string trimleft $num "0"]
+ if {[string length $num] == 0} {
+ set num "0"
+ }
+ return $num
+}
+
+# Add a {key,data} pair to the specified database where
+# key=filename and data=file contents.
+proc put_file { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [eval {$db put} $txn $flags {$file $data}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=filename and data=file contents and then write the
+# data to the specified file.
+proc get_file { db txn flags file outfile } {
+ source ./include.tcl
+
+ set fid [open $outfile w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $txn $flags {$file}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+}
+
+# Add a {key,data} pair to the specified database where
+# key=file contents and data=file name.
+proc put_file_as_key { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ # Use not the file contents, but the file name concatenated
+ # before the file contents, as a key, to ensure uniqueness.
+ set data $file$filecont
+
+ set ret [eval {$db put} $txn $flags {$data $file}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=file contents and data=file name
+proc get_file_as_key { db txn flags file} {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ set data $file$filecont
+
+ return [eval {$db get} $txn $flags {$data}]
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_file {
+ dbname env outfile checkfunc dump_func beg cont } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_subfile {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Sequentially read a file and call checkfunc on each key/data pair.
+# Dump the keys out to the file specified by outfile.
+proc dump_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+proc dump_file_direction { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
+ set kd [lindex $d 0]
+ set k [lindex $kd 0]
+ set d2 [lindex $kd 1]
+ $checkfunc $k $d2
+ puts $outf $k
+ # XXX: Geoff Mainland
+ # puts $outf "$k $d2"
+ }
+ close $outf
+}
+
+proc dump_binkey_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_binkey_file_direction $db $txn $outfile $checkfunc \
+ "-first" "-next"
+}
+proc dump_bin_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+# Note: the following procedure assumes that the binary-file-as-keys were
+# inserted into the database by put_file_as_key, and consist of the file
+# name followed by the file contents as key, to ensure uniqueness.
+proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ set inf $d1
+ for {set d [$c get $begin] } { [llength $d] != 0 } \
+ {set d [$c get $cont] } {
+ set kd [lindex $d 0]
+ set keyfile [lindex $kd 0]
+ set data [lindex $kd 1]
+
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+
+ # Chop off the first few bytes--that's the file name,
+ # added for uniqueness in put_file_as_key, which we don't
+ # want in the regenerated file.
+ set namelen [string length $data]
+ set keyfile [string range $keyfile $namelen end]
+ puts -nonewline $ofid $keyfile
+ close $ofid
+
+ $checkfunc $data $d1
+ puts $outf $data
+ flush $outf
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove $d1
+}
+
+proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $k $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
+
+proc make_data_str { key } {
+ set datastr ""
+ for {set i 0} {$i < 10} {incr i} {
+ append datastr $key
+ }
+ return $datastr
+}
+
+proc error_check_bad { func result bad {txn 0}} {
+ if { [binary_compare $result $bad] == 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp] $func returned error value $bad"
+ }
+}
+
+proc error_check_good { func result desired {txn 0} } {
+ if { [binary_compare $desired $result] != 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp]\
+ $func: expected $desired, got $result"
+ }
+}
+
+# Locks have the prefix of their manager.
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc release_list { l } {
+
+ # Now release all the locks
+ foreach el $l {
+ catch { $el put } ret
+ error_check_good lock_put $ret 0
+ }
+}
+
+proc debug { {stop 0} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+
+ set __debug_on 1
+ set __debug_print 1
+ set __debug_test $stop
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_check { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Parse duplicate data entries of the form N:data. Data_of returns
+# the data part; id_of returns the numerical part
+proc data_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+ return [ string range $str [expr $ndx + 1] end]
+}
+
+proc id_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+
+ return [ string range $str 0 [expr $ndx - 1]]
+}
+
+proc nop { {args} } {
+ return
+}
+
+# Partial put test procedure.
+# Munges a data val through three different partial puts. Stores
+# the final munged string in the dvals array so that you can check
+# it later (dvals should be global). We take the characters that
+# are being replaced, make them capitals and then replicate them
+# some number of times (n_add). We do this at the beginning of the
+# data, at the middle and at the end. The parameters are:
+# db, txn, key -- as per usual. Data is the original data element
+# from which we are starting. n_replace is the number of characters
+# that we will replace. n_add is the number of times we will add
+# the replaced string back in.
+proc partial_put { method db txn gflags key data n_replace n_add } {
+ global dvals
+ source ./include.tcl
+
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+
+ eval {$db put} $txn {$key [chop_data $method $data]}
+
+ # Beginning change
+ set s [string range $data 0 [ expr $n_replace - 1 ] ]
+ set repl [ replicate [string toupper $s] $n_add ]
+
+ # This is gross, but necessary: if this is a fixed-length
+ # method, and the chopped length of $repl is zero,
+ # it's because the original string was zero-length and our data item
+ # is all nulls. Set repl to something non-NULL.
+ if { [is_fixed_length $method] && \
+ [string length [chop_data $method $repl]] == 0 } {
+ set repl [replicate "." $n_add]
+ }
+
+ set newstr [chop_data $method $repl[string range $data $n_replace end]]
+ set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # End Change
+ set len [string length $newstr]
+ set spl [expr $len - $n_replace]
+ # Handle case where $n_replace > $len
+ if { $spl < 0 } {
+ set spl 0
+ }
+
+ set s [string range $newstr [ expr $len - $n_replace ] end ]
+ # Handle zero-length keys
+ if { [string length $s] == 0 } { set s "A" }
+
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method \
+ [string range $newstr 0 [expr $spl - 1 ] ]$repl]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # Middle Change
+ set len [string length $newstr]
+ set mid [expr $len / 2 ]
+ set beg [expr $mid - [expr $n_replace / 2] ]
+ set end [expr $beg + $n_replace - 1]
+ set s [string range $newstr $beg $end]
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method [string range $newstr 0 \
+ [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
+
+ set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ set dvals($key) [pad_data $method $newstr]
+}
+
+proc replicate { str times } {
+ set res $str
+ for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
+ append res $res
+ }
+ return $res
+}
+
+proc repeat { str n } {
+ set ret ""
+ while { $n > 0 } {
+ set ret $str$ret
+ incr n -1
+ }
+ return $ret
+}
+
+proc isqrt { l } {
+ set s [expr sqrt($l)]
+ set ndx [expr [string first "." $s] - 1]
+ return [string range $s 0 $ndx]
+}
+
+# If we run watch_procs multiple times without an intervening
+# testdir cleanup, it's possible that old sentinel files will confuse
+# us. Make sure they're wiped out before we spawn any other processes.
+proc sentinel_init { } {
+ source ./include.tcl
+
+ set filelist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set filelist $result
+ }
+
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+
+ foreach f $filelist {
+ fileremove $f
+ }
+}
+
+proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
+ source ./include.tcl
+
+ set elapsed 0
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-tenth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 10}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
+ while { 1 } {
+
+ tclsleep $delay
+ incr elapsed $delay
+
+ # Find the list of processes with outstanding sentinel
+ # files (i.e. a begin.pid and no end.pid).
+ set beginlist {}
+ set endlist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set beginlist $result
+ }
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set endlist $result
+ }
+
+ set bpids {}
+ catch {unset epids}
+ foreach begfile $beginlist {
+ lappend bpids [string range $begfile \
+ [string length $testdir/begin.] end]
+ }
+ foreach endfile $endlist {
+ set epids([string range $endfile \
+ [string length $testdir/end.] end]) 1
+ }
+
+ # The set of processes that we still want to watch, $l,
+ # is the set of pids that have begun but not ended
+ # according to their sentinel files.
+ set l {}
+ foreach p $bpids {
+ if { [info exists epids($p)] == 0 } {
+ lappend l $p
+ }
+ }
+
+ set rlist {}
+ foreach i $l {
+ set r [ catch { exec $KILL -0 $i } result ]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ if { [ llength $rlist] == 0 } {
+ break
+ } else {
+ puts "[timestamp] processes running: $rlist"
+ }
+
+ if { $elapsed > $max } {
+ # We have exceeded the limit; kill processes
+ # and report an error
+ foreach i $l {
+ tclkill $i
+ }
+ }
+ }
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
+}
+
+# These routines are all used from within the dbscript.tcl tester.
+proc db_init { dbp do_data } {
+ global a_keys
+ global l_keys
+ source ./include.tcl
+
+ set txn ""
+ set nk 0
+ set lastkey ""
+
+ set a_keys() BLANK
+ set l_keys ""
+
+ set c [$dbp cursor]
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ incr nk
+ if { $do_data == 1 } {
+ if { [info exists a_keys($k)] } {
+ lappend a_keys($k) $d2]
+ } else {
+ set a_keys($k) $d2
+ }
+ }
+
+ lappend l_keys $k
+ }
+ error_check_good curs_close [$c close] 0
+
+ return $nk
+}
+
+proc pick_op { min max n } {
+ if { $n == 0 } {
+ return add
+ }
+
+ set x [berkdb random_int 1 12]
+ if {$n < $min} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8} {
+ return get
+ } else {
+ return add
+ }
+ } elseif {$n > $max} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8 } {
+ return get
+ } else {
+ return del
+ }
+
+ } elseif { $x <= 3 } {
+ return del
+ } elseif { $x <= 6 } {
+ return get
+ } elseif { $x <= 9 } {
+ return put
+ } else {
+ return add
+ }
+}
+
+# random_data: Generate a string of random characters.
+# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
+# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
+# that will fit into a 32-bit integer.
+# If the unique flag is 1, then make sure that the string is unique
+# in the array "where".
+proc random_data { avg unique where {recno 0} } {
+ upvar #0 $where arr
+ global debug_on
+ set min 1
+ set max [expr $avg+$avg-1]
+ if { $recno } {
+ #
+ # Tcl seems to have problems with values > 30.
+ #
+ if { $max > 30 } {
+ set max 30
+ }
+ set maxnum [expr int(pow(2, $max))]
+ }
+ while {1} {
+ set len [berkdb random_int $min $max]
+ set s ""
+ if {$recno} {
+ set s [berkdb random_int 1 $maxnum]
+ } else {
+ for {set i 0} {$i < $len} {incr i} {
+ append s [int_to_char [berkdb random_int 0 25]]
+ }
+ }
+
+ if { $unique == 0 || [info exists arr($s)] == 0 } {
+ break
+ }
+ }
+
+ return $s
+}
+
+proc random_key { } {
+ global l_keys
+ global nkeys
+ set x [berkdb random_int 0 [expr $nkeys - 1]]
+ return [lindex $l_keys $x]
+}
+
+proc is_err { desired } {
+ set x [berkdb random_int 1 100]
+ if { $x <= $desired } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc pick_cursput { } {
+ set x [berkdb random_int 1 4]
+ switch $x {
+ 1 { return "-keylast" }
+ 2 { return "-keyfirst" }
+ 3 { return "-before" }
+ 4 { return "-after" }
+ }
+}
+
+proc random_cursor { curslist } {
+ global l_keys
+ global nkeys
+
+ set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
+ set dbc [lindex $curslist $x]
+
+ # We want to randomly set the cursor. Pick a key.
+ set k [random_key]
+ set r [$dbc get "-set" $k]
+ error_check_good cursor_get:$k [is_substr Error $r] 0
+
+ # Now move forward or backward some hops to randomly
+ # position the cursor.
+ set dist [berkdb random_int -10 10]
+
+ set dir "-next"
+ set boundary "-first"
+ if { $dist < 0 } {
+ set dir "-prev"
+ set boundary "-last"
+ set dist [expr 0 - $dist]
+ }
+
+ for { set i 0 } { $i < $dist } { incr i } {
+ set r [ record $dbc get $dir $k ]
+ if { [llength $d] == 0 } {
+ set r [ record $dbc get $k $boundary ]
+ }
+ error_check_bad dbcget [llength $r] 0
+ }
+ return { [linsert r 0 $dbc] }
+}
+
+proc record { args } {
+# Recording every operation makes tests ridiculously slow on
+# NT, so we are commenting this out; for debugging purposes,
+# it will undoubtedly be useful to uncomment this.
+# puts $args
+# flush stdout
+ return [eval $args]
+}
+
+proc newpair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+ lappend l_keys $k
+ incr nkeys
+}
+
+proc rempair { k } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ unset a_keys($k)
+ set n [lsearch $l_keys $k]
+ error_check_bad rempair:$k $n -1
+ set l_keys [lreplace $l_keys $n $n]
+ incr nkeys -1
+}
+
+proc changepair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+}
+
+proc changedup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
+}
+
+# Insert a dup into the a_keys array with DB_KEYFIRST.
+proc adddup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ if { [llength $d] == 0 } {
+ lappend l_keys $k
+ incr nkeys
+ set a_keys($k) { $newdata }
+ }
+
+ set ndx 0
+
+ set d [linsert d $ndx $newdata]
+ set a_keys($k) $d
+}
+
+proc remdup { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d [$a_keys($k)]
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n]
+}
+
+proc dump_full_file { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good dbcursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get $start] } { [string length $d] != 0 } {
+ set d [$c get $continue] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ $checkfunc $k $d2
+ puts $outf "$k\t$d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc int_to_char { i } {
+ global alphabet
+
+ return [string index $alphabet $i]
+}
+
+proc dbcheck { key data } {
+ global l_keys
+ global a_keys
+ global nkeys
+ global check_array
+
+ if { [lsearch $l_keys $key] == -1 } {
+ error "FAIL: Key |$key| not in list of valid keys"
+ }
+
+ set d $a_keys($key)
+
+ if { [info exists check_array($key) ] } {
+ set check $check_array($key)
+ } else {
+ set check {}
+ }
+
+ if { [llength $d] > 1 } {
+ if { [llength $check] != [llength $d] } {
+ # Make the check array the right length
+ for { set i [llength $check] } { $i < [llength $d] } \
+ {incr i} {
+ lappend check 0
+ }
+ set check_array($key) $check
+ }
+
+ # Find this data's index
+ set ndx [lsearch $d $data]
+ if { $ndx == -1 } {
+ error "FAIL: \
+ Data |$data| not found for key $key. Found |$d|"
+ }
+
+ # Set the bit in the check array
+ set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
+ } elseif { [string compare $d $data] != 0 } {
+ error "FAIL: \
+ Invalid data |$data| for key |$key|. Expected |$d|."
+ } else {
+ set check_array($key) 1
+ }
+}
+
+# Dump out the file and verify it
+proc filecheck { file txn } {
+ global check_array
+ global l_keys
+ global nkeys
+ global a_keys
+ source ./include.tcl
+
+ if { [info exists check_array] == 1 } {
+ unset check_array
+ }
+
+ open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
+ "-first" "-next"
+
+ # Check that everything we checked had all its data
+ foreach i [array names check_array] {
+ set count 0
+ foreach j $check_array($i) {
+ if { $j != 1 } {
+ puts -nonewline "Key |$i| never found datum"
+ puts " [lindex $a_keys($i) $count]"
+ }
+ incr count
+ }
+ }
+
+ # Check that all keys appeared in the checked array
+ set count 0
+ foreach k $l_keys {
+ if { [info exists check_array($k)] == 0 } {
+ puts "filecheck: key |$k| not found. Data: $a_keys($k)"
+ }
+ incr count
+ }
+
+ if { $count != $nkeys } {
+ puts "filecheck: Got $count keys; expected $nkeys"
+ }
+}
+
+proc cleanup { dir env { quiet 0 } } {
+ global gen_upgrade
+ global is_qnx_test
+ global old_encrypt
+ global passwd
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ source ./include.tcl
+
+ if { $gen_upgrade == 1 } {
+ set vers [berkdb version]
+ set maj [lindex $vers 0]
+ set min [lindex $vers 1]
+
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
+ } else {
+ set myendianness le
+ }
+
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
+
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ set dbfiles [glob -nocomplain $dir/*.db]
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ error_check_good db_dump($dbfile) \
+ [catch {exec $util_path/db_dump -k $dbfile > \
+ $dir/$newbasename.dump}] 0
+
+ # tcl_dump file
+ upgrade_dump $dbfile \
+ $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar}
+ cd $cwd
+ }
+ }
+
+# check_handles
+ set remfiles {}
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ foreach fileorig $result {
+ #
+ # We:
+ # - Ignore any env-related files, which are
+ # those that have __db.* or log.* if we are
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
+ # - Call 'dbremove' on any databases.
+ # Remove any remaining temp files.
+ #
+ switch -glob -- $fileorig {
+ */DIR_* -
+ */__db.* -
+ */log.* {
+ if { $env != "NULL" } {
+ continue
+ } else {
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
+ }
+ }
+ *.db {
+ set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
+ if { $env != "NULL"} {
+ set file [file tail $fileorig]
+ set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
+ }
+
+ # If a database is left in a corrupt
+ # state, dbremove might not be able to handle
+ # it (it does an open before the remove).
+ # Be prepared for this, and if necessary,
+ # just forcibly remove the file with a warning
+ # message.
+ set ret [catch \
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
+ if { $ret != 0 } {
+ # If it failed, there is a chance
+ # that the previous run was using
+ # encryption and we cannot know about
+ # it (different tclsh instantiation).
+ # Try to remove it with crypto.
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $envargs $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
+ "FAIL: dbremove in cleanup failed: $res"
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
+ }
+ }
+ default {
+ lappend remfiles $fileorig
+ }
+ }
+ }
+ if {[llength $remfiles] > 0} {
+ eval fileremove -f $remfiles
+ }
+ }
+}
+
+proc log_cleanup { dir } {
+ source ./include.tcl
+
+ set files [glob -nocomplain $dir/log.*]
+ if { [llength $files] != 0} {
+ foreach f $files {
+ fileremove -f $f
+ }
+ }
+}
+
+proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
+ #
+ # If something failed and we are left with a region entry
+ # in /dev/shmem that is zero-length, the envremove will
+ # succeed, and the shm_unlink will succeed, but it will not
+ # remove the zero-length entry from /dev/shmem. Remove it
+ # using fileremove or else all other tests using an env
+ # will immediately fail.
+ #
+ if { $is_qnx_test == 1 } {
+ set region_files [glob -nocomplain /dev/shmem/$dir*]
+ if { [llength $region_files] != 0 } {
+ foreach f $region_files {
+ fileremove -f $f
+ }
+ }
+ }
+ log_cleanup $dir
+ cleanup $dir NULL
+}
+
+proc remote_cleanup { server dir localdir } {
+ set home [file tail $dir]
+ error_check_good cleanup:remove [berkdb envremove -home $home \
+ -server $server] 0
+ catch {exec rsh $server rm -f $dir/*} ret
+ cleanup $localdir NULL
+}
+
+proc help { cmd } {
+ if { [info command $cmd] == $cmd } {
+ set is_proc [lsearch [info procs $cmd] $cmd]
+ if { $is_proc == -1 } {
+ # Not a procedure; must be a C command
+ # Let's hope that it takes some parameters
+ # and that it prints out a message
+ puts "Usage: [eval $cmd]"
+ } else {
+ # It is a tcl procedure
+ puts -nonewline "Usage: $cmd"
+ set args [info args $cmd]
+ foreach a $args {
+ set is_def [info default $cmd $a val]
+ if { $is_def != 0 } {
+ # Default value
+ puts -nonewline " $a=$val"
+ } elseif {$a == "args"} {
+ # Print out flag values
+ puts " options"
+ args
+ } else {
+ # No default value
+ puts -nonewline " $a"
+ }
+ }
+ puts ""
+ }
+ } else {
+ puts "$cmd is not a command"
+ }
+}
+
+# Run a recovery test for a particular operation
+# Notice that we catch the return from CP and do not do anything with it.
+# This is because Solaris CP seems to exit non-zero on occasion, but
+# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
+proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
+ }
+ op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ source ./include.tcl
+
+ #puts "op_recover: $op $dir $env $dbfile $cmd"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ # Save the initial file and open the environment and the file
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
+ copy_extent_file $dir $dbfile init
+
+ convert_encrypt $env_cmd
+ set env [eval $env_cmd]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set db [berkdb open -auto_commit -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Dump out file contents for initial case
+ open_and_dump_file $dbfile $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set t [$env txn]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_substr $t "txn"] 1
+
+ # Now fill in the db, tmgr, and the txnid in the command
+ set exec_cmd $cmd
+
+ set i [lsearch $cmd ENV]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $env]
+ }
+
+ set i [lsearch $cmd TXNID]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $t]
+ }
+
+ set i [lsearch $exec_cmd DB]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $db]
+ }
+
+ # To test DB_CONSUME, we need to expect a record return, not "0".
+ set i [lsearch $exec_cmd "-consume"]
+ if { $i != -1 } {
+ set record_exec_cmd_ret 1
+ } else {
+ set record_exec_cmd_ret 0
+ }
+
+ # For the DB_APPEND test, we need to expect a return other than
+ # 0; set this flag to be more lenient in the error_check_good.
+ set i [lsearch $exec_cmd "-append"]
+ if { $i != -1 } {
+ set lenient_exec_cmd_ret 1
+ } else {
+ set lenient_exec_cmd_ret 0
+ }
+
+ # Execute command and commit/abort it.
+ set ret [eval $exec_cmd]
+ if { $record_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
+ } elseif { $lenient_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
+ } else {
+ error_check_good "\"$exec_cmd\"" $ret 0
+ }
+
+ set record_exec_cmd_ret 0
+ set lenient_exec_cmd_ret 0
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+ open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+
+ #puts "\t\t\tExecuting txn_$op:$t"
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
+ }
+
+ switch $op {
+ "commit" { puts "\t\tCommand executed and committed." }
+ "abort" { puts "\t\tCommand executed and aborted." }
+ "prepare" { puts "\t\tCommand executed and prepared." }
+ }
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
+ copy_extent_file $dir $dbfile final
+ open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next"
+
+ # If this is an abort or prepare-abort, it should match the
+ # original file.
+ # If this was a commit or prepare-commit, then this file should
+ # match the afterop file.
+ # If this was a prepare without an abort or commit, we still
+ # have transactions active, and peering at the database from
+ # another environment will show data from uncommitted transactions.
+ # Thus we just skip this in the prepare-only case; what
+ # we care about are the results of a prepare followed by a
+ # recovery, which we test later.
+ if { $op == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } elseif { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
+ }
+
+ # Running recovery on this database should not do anything.
+ # Flush all data to disk, close the environment and save the
+ # file.
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
+ flush stdout
+
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+ puts "verified"
+
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # Now close the environment, substitute a file that will need
+ # recovery and try running recovery again.
+ reset_env $env
+ if { $op == "commit" || $op2 == "commit" } {
+ catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
+ move_file_extent $dir $dbfile init copy
+ } else {
+ catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
+ flush stdout
+
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0
+
+ puts "verified"
+
+ set env [eval $env_cmd]
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $final_file $final_file.sort
+ filesort $afterop_file $afterop_file.sort
+ error_check_good \
+ diff(post-$op,recovered):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # This should just close the environment, not blow it away.
+ reset_env $env
+}
+
+proc populate { db method txn n dups bigdata } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } elseif { $dups == 1 } {
+ set key duplicate_key
+ } else {
+ set key $str
+ }
+ if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
+ set str [replicate $str 1000]
+ }
+
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc big_populate { db txn n } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ set key [replicate $str 50]
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc unpopulate { db txn num } {
+ source ./include.tcl
+
+ set c [eval {$db cursor} "-txn $txn"]
+ error_check_bad $db:cursor $c NULL
+ error_check_good $db:cursor [is_substr $c $db] 1
+
+ set i 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ $c del
+ incr i
+ if { $num != 0 && $ >= $num } {
+ break
+ }
+ }
+ error_check_good cursor_close [$c close] 0
+ return 0
+}
+
+proc reset_env { env } {
+ error_check_good env_close [$env close] 0
+}
+
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire a write lock
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
+# This routine will let us obtain a ring of deadlocks.
+# Each locker will get a lock on obj_id, then sleep, and
+# then try to lock (obj_id + 1) % num.
+# When the lock is finally granted, we release our locks and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker deadlocks and the
+# rest all finish successfully.
+proc ring { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+ set nextobj [expr ($obj_id + 1) % $num]
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ puts $lock2
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$obj_id $lock2 NULL
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+# This routine will create massive deadlocks.
+# Each locker will get a readlock on obj_id, then sleep, and
+# then try to upgrade the readlock to a write lock.
+# When the lock is finally granted, we release our first lock and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker succeeds in getting all
+# the locks and everyone else deadlocks.
+proc clump { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ set obj_id 10
+ if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id \
+ [is_valid_lock $lock1 $myenv] TRUE
+ }
+
+ tclsleep 30
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+ }
+
+proc dead_check { t procs timeout dead clean other } {
+ error_check_good $t:$procs:other $other 0
+ switch $t {
+ ring {
+ # with timeouts the number of deadlocks is unpredictable
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ clump {
+ error_check_good $t:$procs:deadlocks $dead \
+ [expr $procs - 1]
+ error_check_good $t:$procs:success $clean 1
+ }
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ default {
+ error "Test $t not implemented"
+ }
+ }
+}
+
+proc rdebug { id op where } {
+ global recd_debug
+ global recd_id
+ global recd_op
+
+ set recd_debug $where
+ set recd_id $id
+ set recd_op $op
+}
+
+proc rtag { msg id } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { $id == $tag } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc zero_list { n } {
+ set ret ""
+ while { $n > 0 } {
+ lappend ret 0
+ incr n -1
+ }
+ return $ret
+}
+
+proc check_dump { k d } {
+ puts "key: $k data: $d"
+}
+
+proc reverse { s } {
+ set res ""
+ for { set i 0 } { $i < [string length $s] } { incr i } {
+ set res "[string index $s $i]$res"
+ }
+
+ return $res
+}
+
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
+proc is_valid_widget { w expected } {
+ # First N characters must match "expected"
+ set l [string length $expected]
+ incr l -1
+ if { [string compare [string range $w 0 $l] $expected] != 0 } {
+ return $w
+ }
+
+ # Remaining characters must be digits
+ incr l 1
+ for { set i $l } { $i < [string length $w] } { incr i} {
+ set c [string index $w $i]
+ if { $c < "0" || $c > "9" } {
+ return $w
+ }
+ }
+
+ return TRUE
+}
+
+proc is_valid_db { db } {
+ return [is_valid_widget $db db]
+}
+
+proc is_valid_env { env } {
+ return [is_valid_widget $env env]
+}
+
+proc is_valid_cursor { dbc db } {
+ return [is_valid_widget $dbc $db.c]
+}
+
+proc is_valid_lock { lock env } {
+ return [is_valid_widget $lock $env.lock]
+}
+
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
+proc is_valid_mpool { mpool env } {
+ return [is_valid_widget $mpool $env.mp]
+}
+
+proc is_valid_page { page mpool } {
+ return [is_valid_widget $page $mpool.pg]
+}
+
+proc is_valid_txn { txn env } {
+ return [is_valid_widget $txn $env.txn]
+}
+
+proc is_valid_mutex { m env } {
+ return [is_valid_widget $m $env.mutex]
+}
+
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
+proc send_cmd { fd cmd {sleep 2}} {
+ source ./include.tcl
+
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
+ puts $fd "flush stdout"
+ flush $fd
+ berkdb debug_check
+ tclsleep $sleep
+
+ set r [rcv_result $fd]
+ return $r
+}
+
+proc rcv_result { fd } {
+ set r [gets $fd result]
+ error_check_bad remote_read $r -1
+
+ return $result
+}
+
+proc send_timed_cmd { fd rcv_too cmd } {
+ set c1 "set start \[timestamp -r\]; "
+ set c2 "puts \[expr \[timestamp -r\] - \$start\]"
+ set full_cmd [concat $c1 $cmd ";" $c2]
+
+ puts $fd $full_cmd
+ puts $fd "flush stdout"
+ flush $fd
+ return 0
+}
+
+#
+# The rationale behind why we have *two* "data padding" routines is outlined
+# below:
+#
+# Both pad_data and chop_data truncate data that is too long. However,
+# pad_data also adds the pad character to pad data out to the fixed length
+# record length.
+#
+# Which routine you call does not depend on the length of the data you're
+# using, but on whether you're doing a put or a get. When we do a put, we
+# have to make sure the data isn't longer than the size of a record because
+# otherwise we'll get an error (use chop_data). When we do a get, we want to
+# check that db padded everything correctly (use pad_data on the value against
+# which we are comparing).
+#
+# We don't want to just use the pad_data routine for both purposes, because
+# we want to be able to test whether or not db is padding correctly. For
+# example, the queue access method had a bug where when a record was
+# overwritten (*not* a partial put), only the first n bytes of the new entry
+# were written, n being the new entry's (unpadded) length. So, if we did
+# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
+# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
+# have gotten the "correct" result, but we wouldn't have found this bug.
+proc chop_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1 && \
+ [string length $data] > $fixed_len} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc pad_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc make_fixed_length {method data {pad 0}} {
+ global fixed_len
+ global fixed_pad
+
+ if {[is_fixed_length $method] == 1} {
+ if {[string length $data] > $fixed_len } {
+ error_check_bad make_fixed_len:TOO_LONG 1 1
+ }
+ while { [string length $data] < $fixed_len } {
+ set data [format $data%c $fixed_pad]
+ }
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 127 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+# shift data for partial
+# pad with fixed pad (which is NULL)
+proc partial_shift { data offset direction} {
+ global fixed_len
+
+ set len [expr $fixed_len - 1]
+
+ if { [string compare $direction "right"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [binary format x1a$len $data]
+ }
+ } elseif { [string compare $direction "left"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [string range $data 1 end]
+ set data [binary format a$len $data]
+ }
+ }
+ return $data
+}
+
+# string compare does not always work to compare
+# this data, nor does expr (==)
+# specialized routine for comparison
+# (for use in fixed len recno and q)
+proc binary_compare { data1 data2 } {
+ if { [string length $data1] != [string length $data2] || \
+ [string compare -length \
+ [string length $data1] $data1 $data2] != 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc convert_method { method } {
+ switch -- $method {
+ -btree -
+ -dbtree -
+ dbtree -
+ -ddbtree -
+ ddbtree -
+ -rbtree -
+ BTREE -
+ DB_BTREE -
+ DB_RBTREE -
+ RBTREE -
+ bt -
+ btree -
+ db_btree -
+ db_rbtree -
+ rbt -
+ rbtree { return "-btree" }
+
+ -dhash -
+ -ddhash -
+ -hash -
+ DB_HASH -
+ HASH -
+ dhash -
+ ddhash -
+ db_hash -
+ h -
+ hash { return "-hash" }
+
+ -queue -
+ DB_QUEUE -
+ QUEUE -
+ db_queue -
+ q -
+ qam -
+ queue { return "-queue" }
+
+ -queueextent -
+ QUEUEEXTENT -
+ qe -
+ qamext -
+ -queueext -
+ queueextent -
+ queueext { return "-queue" }
+
+ -frecno -
+ -recno -
+ -rrecno -
+ DB_FRECNO -
+ DB_RECNO -
+ DB_RRECNO -
+ FRECNO -
+ RECNO -
+ RRECNO -
+ db_frecno -
+ db_recno -
+ db_rrecno -
+ frec -
+ frecno -
+ rec -
+ recno -
+ rrec -
+ rrecno { return "-recno" }
+
+ default { error "FAIL:[timestamp] $method: unknown method" }
+ }
+}
+
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
+# If recno-with-renumbering or btree-with-renumbering is specified, then
+# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
+# -flags argument.
+proc convert_args { method {largs ""} } {
+ global fixed_len
+ global fixed_pad
+ global gen_upgrade
+ global upgrade_be
+ source ./include.tcl
+
+ if { [string first - $largs] == -1 &&\
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
+ set errstring "args must contain a hyphen; does this test\
+ have no numeric args?"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
+ return -code return
+ }
+
+ convert_encrypt $largs
+ if { $gen_upgrade == 1 && $upgrade_be == 1 } {
+ append largs " -lorder 4321 "
+ } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
+ append largs " -lorder 1234 "
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ append largs " -renumber "
+ } elseif { [is_rbtree $method] == 1 } {
+ append largs " -recnum "
+ } elseif { [is_dbtree $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddbtree $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_dhash $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_queueext $method] == 1 } {
+ append largs " -extent 2 "
+ }
+
+ if {[is_fixed_length $method] == 1} {
+ append largs " -len $fixed_len -pad $fixed_pad "
+ }
+ return $largs
+}
+
+proc is_btree { method } {
+ set names { -btree BTREE DB_BTREE bt btree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dbtree { method } {
+ set names { -dbtree dbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddbtree { method } {
+ set names { -ddbtree ddbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rbtree { method } {
+ set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_recno { method } {
+ set names { -recno DB_RECNO RECNO db_recno rec recno}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rrecno { method } {
+ set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_frecno { method } {
+ set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_hash { method } {
+ set names { -hash DB_HASH HASH db_hash h hash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dhash { method } {
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queue { method } {
+ if { [is_queueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queueext { method } {
+ set names { -queueextent queueextent QUEUEEXTENT qe qamext \
+ queueext -queueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_record_based { method } {
+ if { [is_recno $method] || [is_frecno $method] ||
+ [is_rrecno $method] || [is_queue $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_fixed_length { method } {
+ if { [is_queue $method] || [is_frecno $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Sort lines in file $in and write results to file $out.
+# This is a more portable alternative to execing the sort command,
+# which has assorted issues on NT [#1576].
+# The addition of a "-n" argument will sort numerically.
+proc filesort { in out { arg "" } } {
+ set i [open $in r]
+
+ set ilines {}
+ while { [gets $i line] >= 0 } {
+ lappend ilines $line
+ }
+
+ if { [string compare $arg "-n"] == 0 } {
+ set olines [lsort -integer $ilines]
+ } else {
+ set olines [lsort $ilines]
+ }
+
+ close $i
+
+ set o [open $out w]
+ foreach line $olines {
+ puts $o $line
+ }
+
+ close $o
+}
+
+# Print lines up to the nth line of infile out to outfile, inclusive.
+# The optional beg argument tells us where to start.
+proc filehead { n infile outfile { beg 0 } } {
+ set in [open $infile r]
+ set out [open $outfile w]
+
+ # Sed uses 1-based line numbers, and so we do too.
+ for { set i 1 } { $i < $beg } { incr i } {
+ if { [gets $in junk] < 0 } {
+ break
+ }
+ }
+
+ for { } { $i <= $n } { incr i } {
+ if { [gets $in line] < 0 } {
+ break
+ }
+ puts $out $line
+ }
+
+ close $in
+ close $out
+}
+
+# Remove file (this replaces $RM).
+# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
+proc fileremove { args } {
+ set forceflag ""
+ foreach a $args {
+ if { [string first - $a] == 0 } {
+ # It's a flag. Better be f.
+ if { [string first f $a] != 1 } {
+ return -code error "bad flag to fileremove"
+ } else {
+ set forceflag "-force"
+ }
+ } else {
+ eval {file delete $forceflag $a}
+ }
+ }
+}
+
+proc findfail { args } {
+ foreach a $args {
+ if { [file exists $a] == 0 } {
+ continue
+ }
+ set f [open $a r]
+ while { [gets $f line] >= 0 } {
+ if { [string first FAIL $line] == 0 } {
+ close $f
+ return 1
+ }
+ }
+ close $f
+ }
+ return 0
+}
+
+# Sleep for s seconds.
+proc tclsleep { s } {
+ # On Windows, the system time-of-day clock may update as much
+ # as 55 ms late due to interrupt timing. Don't take any
+ # chances; sleep extra-long so that when tclsleep 1 returns,
+ # it's guaranteed to be a new second.
+ after [expr $s * 1000 + 56]
+}
+
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
+# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
+proc filecmp { file_a file_b } {
+ set fda [open $file_a r]
+ set fdb [open $file_b r]
+
+ set nra 0
+ set nrb 0
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ while { $nra >= 0 && $nrb >= 0 } {
+ set nra [gets $fda aline]
+ set nrb [gets $fdb bline]
+
+ if { $nra != $nrb || [string compare $aline $bline] != 0} {
+ close $fda
+ close $fdb
+ return 1
+ }
+ }
+
+ close $fda
+ close $fdb
+ return 0
+}
+
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
+# Verify all .db files in the specified directory.
+proc verify_dir { {directory $testdir} \
+ { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } {
+ global encrypt
+ global passwd
+
+ # If we're doing database verification between tests, we don't
+ # want to do verification twice without an intervening cleanup--some
+ # test was skipped. Always verify by default (noredo == 0) so
+ # that explicit calls to verify_dir during tests don't require
+ # cleanup commands.
+ if { $noredo == 1 } {
+ if { [file exists $directory/NOREVERIFY] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping verification."
+ }
+ return
+ }
+ set f [open $directory/NOREVERIFY w]
+ close $f
+ }
+
+ if { [catch {glob $directory/*.db} dbs] != 0 } {
+ # No files matched
+ return
+ }
+ if { [file exists /dev/stderr] == 1 } {
+ set errfilearg "-errfile /dev/stderr "
+ } else {
+ set errfilearg ""
+ }
+ set errpfxarg {-errpfx "FAIL: verify" }
+ set errarg $errfilearg$errpfxarg
+ set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env $errarg "
+
+ foreach db $dbs {
+ if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Verification of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good verify:$db $res 0
+ if { $quiet == 0 } {
+ puts "${pref}Verification of $db succeeded."
+ }
+ }
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
+ }
+
+ error_check_good vrfyenv_close [$env close] 0
+
+ return $ret
+}
+
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc dumploadtest { db {subdb ""} } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ # Open original database, or subdb if we have one.
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+ set max_size [expr 15 * 1024]
+ if { [string length $subdb] == 0 } {
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, dumploadtest each one
+ # separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ # Skip any files over this size. The problem is
+ # that when when we dump/load it, files that are
+ # too big result in E2BIG errors because the
+ # arguments to db_dump are too long. 64K seems
+ # to be the limit (on FreeBSD), cut it to 32K
+ # just to be safe.
+ if {[string length $subdb] < $max_size && \
+ [string length $subdb] != 0} {
+ dumploadtest $db $subdb
+ }
+ }
+ error_check_good oldcclose [$oc close] 0
+ error_check_good olddbclose [$olddb close] 0
+ return 0
+ }
+ # No subdatabase
+ set have_subdb 0
+ } else {
+ set olddb [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ set have_subdb 1
+ }
+
+ # Do a db_dump test. Dump/load each file.
+ if { $have_subdb } {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ -s {$subdb} $db | \
+ $util_path/db_load $utilflag $newdbname} res]
+ } else {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ }
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # Now open new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($db) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next] } {
+ set ndbt [$nc get -get_both \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ error_check_good db_compare($db/$newdbname) $ndbt $odbt
+ }
+
+ for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next] } {
+ set odbt [$oc get -get_both \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ error_check_good db_compare_back($db) $odbt $ndbt
+ }
+
+ error_check_good orig_cursor_close($db) [$oc close] 0
+ error_check_good new_cursor_close($db) [$nc close] 0
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ error_check_good new_db_close($db) [$newdb close] 0
+
+ eval berkdb dbremove $dbarg $newdbname
+
+ return 0
+}
+
+# Generate randomly ordered, guaranteed-unique four-character strings that can
+# be used to differentiate duplicates without creating duplicate duplicates.
+# (test031 & test032) randstring_init is required before the first call to
+# randstring and initializes things for up to $i distinct strings; randstring
+# gets the next string.
+proc randstring_init { i } {
+ global rs_int_list alphabet
+
+ # Fail if we can't generate sufficient unique strings.
+ if { $i > [expr 26 * 26 * 26 * 26] } {
+ set errstring\
+ "Duplicate set too large for random string generator"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set rs_int_list {}
+
+ # generate alphabet array
+ for { set j 0 } { $j < 26 } { incr j } {
+ set a($j) [string index $alphabet $j]
+ }
+
+ # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
+ for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
+ for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
+ for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
+ for { set d4 0 } { $d4 < 26 && $j < $i } \
+ { incr d4 } {
+ lappend rs_int_list \
+ $a($d1)$a($d2)$a($d3)$a($d4)
+ incr j
+ }
+ }
+ }
+ }
+
+ # Randomize the list.
+ set rs_int_list [randomize_list $rs_int_list]
+}
+
+# Randomize a list. Returns a randomly-reordered copy of l.
+proc randomize_list { l } {
+ set i [llength $l]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ # Pick a random element from $j to the end
+ set k [berkdb random_int $j [expr $i - 1]]
+
+ # Swap it with element $j
+ set t1 [lindex $l $j]
+ set t2 [lindex $l $k]
+
+ set l [lreplace $l $j $j $t2]
+ set l [lreplace $l $k $k $t1]
+ }
+
+ return $l
+}
+
+proc randstring {} {
+ global rs_int_list
+
+ if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
+ set errstring "randstring uninitialized or used too often"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set item [lindex $rs_int_list 0]
+ set rs_int_list [lreplace $rs_int_list 0 0]
+
+ return $item
+}
+
+# Takes a variable-length arg list, and returns a list containing the list of
+# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
+# flag it finds.
+proc extractflags { args } {
+ set inflags 1
+ set flags {}
+ while { $inflags == 1 } {
+ set curarg [lindex $args 0]
+ if { [string first "-" $curarg] == 0 } {
+ set i 1
+ while {[string length [set f \
+ [string index $curarg $i]]] > 0 } {
+ incr i
+ if { [string compare $f "-"] == 0 } {
+ set inflags 0
+ break
+ } else {
+ lappend flags $f
+ }
+ }
+ set args [lrange $args 1 end]
+ } else {
+ set inflags 0
+ }
+ }
+ return [list $args $flags]
+}
+
+# Wrapper for berkdb open, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb open} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_open_noerr { args } {
+ eval {berkdb open} $args
+}
+
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
+proc check_handles { {outf stdout} } {
+ global ohandles
+
+ set handles [berkdb handles]
+ if {[llength $handles] != [llength $ohandles]} {
+ puts $outf "WARNING: Open handles during cleanup: $handles"
+ }
+ set ohandles $handles
+}
+
+proc open_handles { } {
+ return [llength [berkdb handles]]
+}
+
+proc move_file_extent { dir dbfile tag op } {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ #
+ # We can either copy or rename
+ #
+ file $op -force $extfile $dbq
+ }
+}
+
+proc copy_extent_file { dir dbfile tag { op copy } } {
+ set files [get_extfiles $dir $dbfile ""]
+ foreach extfile $files {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum
+ }
+}
+
+proc get_extfiles { dir dbfile tag } {
+ if { $tag == "" } {
+ set filepat $dir/__dbq.$dbfile.\[0-9\]*
+ } else {
+ set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]*
+ }
+ return [glob -nocomplain -- $filepat]
+}
+
+proc make_ext_filename { dir dbfile extnum } {
+ return $dir/__dbq.$dbfile.$extnum
+}
+
+# All pids for Windows 9X are negative values. When we want to have
+# unsigned int values, unique to the process, we'll take the absolute
+# value of the pid. This avoids unsigned/signed mistakes, yet
+# guarantees uniqueness, since each system has pids that are all
+# either positive or negative.
+#
+proc sanitized_pid { } {
+ set mypid [pid]
+ if { $mypid < 0 } {
+ set mypid [expr - $mypid]
+ }
+ puts "PID: [pid] $mypid\n"
+ return $mypid
+}
+
+#
+# Extract the page size field from a stat record. Return -1 if
+# none is found.
+#
+proc get_pagesize { stat } {
+ foreach field $stat {
+ set title [lindex $field 0]
+ if {[string compare $title "Page size"] == 0} {
+ return [lindex $field 1]
+ }
+ }
+ return -1
+}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global src_root
+
+ if { $is_qnx_test } {
+ set small 1
+ }
+ if { $small && $is_windows_test } {
+ return [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ return [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ return \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
diff --git a/storage/bdb/test/txn001.tcl b/storage/bdb/test/txn001.tcl
new file mode 100644
index 00000000000..406ef35751c
--- /dev/null
+++ b/storage/bdb/test/txn001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn001.tcl,v 11.35 2002/05/10 17:44:28 sue Exp $
+#
+
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "01"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "01"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/storage/bdb/test/txn002.tcl b/storage/bdb/test/txn002.tcl
new file mode 100644
index 00000000000..5107472644d
--- /dev/null
+++ b/storage/bdb/test/txn002.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn002.tcl,v 11.38 2002/05/10 17:44:29 sue Exp $
+#
+
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "02" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "02" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/txn003.tcl b/storage/bdb/test/txn003.tcl
new file mode 100644
index 00000000000..71e450cf9ce
--- /dev/null
+++ b/storage/bdb/test/txn003.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn003.tcl,v 11.40 2002/09/05 17:23:08 sandstro Exp $
+#
+
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "03"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Outstanding child transaction test"
+
+ if { $tnum != "03" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put -auto_commit $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn0$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn0$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn0$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ # We can't do the attempted child discard on Windows
+ # because it will leave open files that can't be removed.
+ # Skip the remainder of the test for Windows.
+ if { $is_windows_test == 1 } {
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+ puts "\tTxn0$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn0$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ set stat [catch {$env txn} ret]
+ error_check_good begin_error $stat 1
+ error_check_good begin_err \
+ [is_substr $ret "not yet committed transactions is incomplete"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/storage/bdb/test/txn004.tcl b/storage/bdb/test/txn004.tcl
new file mode 100644
index 00000000000..75e1b40043f
--- /dev/null
+++ b/storage/bdb/test/txn004.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn004.tcl,v 11.39 2002/05/15 17:14:06 sandstro Exp $
+#
+
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "04.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "04.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/storage/bdb/test/txn005.tcl b/storage/bdb/test/txn005.tcl
new file mode 100644
index 00000000000..604f3ad7de4
--- /dev/null
+++ b/storage/bdb/test/txn005.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn005.tcl,v 11.35 2002/08/08 15:38:14 bostic Exp $
+#
+
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/storage/bdb/test/txn006.tcl b/storage/bdb/test/txn006.tcl
new file mode 100644
index 00000000000..7bf37d34dfc
--- /dev/null
+++ b/storage/bdb/test/txn006.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn006.tcl,v 1.5 2002/08/01 19:59:19 sue Exp $
+#
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/storage/bdb/test/txn007.tcl b/storage/bdb/test/txn007.tcl
new file mode 100644
index 00000000000..f67dc209f92
--- /dev/null
+++ b/storage/bdb/test/txn007.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn007.tcl,v 11.3 2002/08/08 15:38:14 bostic Exp $
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/storage/bdb/test/txn008.tcl b/storage/bdb/test/txn008.tcl
new file mode 100644
index 00000000000..ad57ea0eeaa
--- /dev/null
+++ b/storage/bdb/test/txn008.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn008.tcl,v 11.3 2002/05/10 17:55:54 sue Exp $
+#
+
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "08.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "08.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/storage/bdb/test/txn009.tcl b/storage/bdb/test/txn009.tcl
new file mode 100644
index 00000000000..784c0068a41
--- /dev/null
+++ b/storage/bdb/test/txn009.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn009.tcl,v 11.3 2002/05/10 17:55:55 sue Exp $
+#
+
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "09.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "09.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/storage/bdb/test/txnscript.tcl b/storage/bdb/test/txnscript.tcl
new file mode 100644
index 00000000000..1a4a1b6f2ec
--- /dev/null
+++ b/storage/bdb/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txnscript.tcl,v 11.3 2002/01/23 15:33:40 bostic Exp $
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/storage/bdb/test/update.tcl b/storage/bdb/test/update.tcl
new file mode 100644
index 00000000000..2bedfacc793
--- /dev/null
+++ b/storage/bdb/test/update.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: update.tcl,v 11.11 2002/01/11 15:53:58 bostic Exp $
+
+source ./include.tcl
+global update_dir
+set update_dir "$test_path/update_test"
+
+proc update { } {
+ source ./include.tcl
+ global update_dir
+
+ foreach version [glob $update_dir/*] {
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $update_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $update_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+ foreach endianness {"le" "be"} {
+ puts "Update:\
+ $version $method $name $endianness"
+ set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
+ if { $ret != 0 } {
+ puts $message
+ }
+ }
+ }
+ }
+ }
+}
+
+proc _update { source_dir temp_dir \
+ version method file endianness do_db_load_test do_update_test } {
+ source include.tcl
+ global errorInfo
+
+ cleanup $temp_dir NULL
+
+ exec sh -c \
+"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
+
+ if { $do_db_load_test } {
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update load: $version $method $file $message" $ret 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f "$temp_dir/update.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update dump: $version $method $file $message" $ret 0
+
+ error_check_good "Update diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.1.2: $version $method $file" $ret ""
+ }
+
+ if { $do_update_test } {
+ set ret [catch \
+ {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
+ if { $ret == 1 } {
+ if { ![is_substr $errorInfo "version upgrade"] } {
+ set fnl [string first "\n" $errorInfo]
+ set theError \
+ [string range $errorInfo 0 [expr $fnl - 1]]
+ error $theError
+ }
+ } else {
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f \
+ "$temp_dir/update.dump" \
+ "$temp_dir/$file-$endianness.db"} message]
+ error_check_good "Update\
+ dump: $version $method $file $message" $ret 0
+
+ error_check_good \
+ "Update diff.2: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" \
+ "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.2: $version $method $file" $ret ""
+ }
+ }
+}
diff --git a/storage/bdb/test/upgrade.tcl b/storage/bdb/test/upgrade.tcl
new file mode 100644
index 00000000000..1c0ffc5461a
--- /dev/null
+++ b/storage/bdb/test/upgrade.tcl
@@ -0,0 +1,294 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: upgrade.tcl,v 11.22 2002/07/28 03:22:41 krinsky Exp $
+
+source ./include.tcl
+
+global upgrade_dir
+# set upgrade_dir "$test_path/upgrade_test"
+set upgrade_dir "$test_path/upgrade/databases"
+
+global gen_upgrade
+set gen_upgrade 0
+
+global upgrade_dir
+global upgrade_be
+global upgrade_method
+global upgrade_name
+
+proc upgrade { { archived_test_loc "DEFAULT" } } {
+ source ./include.tcl
+ global upgrade_dir
+
+ set saved_upgrade_dir $upgrade_dir
+
+ puts -nonewline "Upgrade test: "
+ if { $archived_test_loc == "DEFAULT" } {
+ puts "using default archived databases in $upgrade_dir."
+ } else {
+ set upgrade_dir $archived_test_loc
+ puts "using archived databases in $upgrade_dir."
+ }
+
+ foreach version [glob $upgrade_dir/*] {
+ if { [string first CVS $version] != -1 } { continue }
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $upgrade_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $upgrade_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+
+ cleanup $testdir NULL 1
+ #puts "$upgrade_dir/$version/$method/$name.tar.gz"
+ set curdir [pwd]
+ cd $testdir
+ set tarfd [open "|tar xf -" w]
+ cd $curdir
+
+ catch {exec gunzip -c "$upgrade_dir/$version/$method/$name.tar.gz" >@$tarfd}
+ close $tarfd
+
+ set f [open $testdir/$name.tcldump {RDWR CREAT}]
+ close $f
+
+ # It may seem suboptimal to exec a separate
+ # tclsh for each subtest, but this is
+ # necessary to keep the testing process
+ # from consuming a tremendous amount of
+ # memory.
+ if { [file exists $testdir/$name-le.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name le"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ if { [file exists $testdir/$name-be.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name be"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _db_load_test $testdir $version $method\
+ $name"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+
+ }
+ }
+ }
+ set upgrade_dir $saved_upgrade_dir
+
+ # Don't provide a return value.
+ return
+}
+
+proc _upgrade_test { temp_dir version method file endianness } {
+ source include.tcl
+ global errorInfo
+
+ puts "Upgrade: $version $method $file $endianness"
+
+ set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
+ error_check_good dbupgrade $ret 0
+
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
+ upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _db_load_test { temp_dir version method file } {
+ source include.tcl
+ global errorInfo
+
+ puts "db_load: $version $method $file"
+
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/upgrade.db"} message]
+ error_check_good \
+ "Upgrade load: $version $method $file $message" $ret 0
+
+ upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc gen_upgrade { dir } {
+ global gen_upgrade
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set gen_upgrade 1
+ set upgrade_dir $dir
+
+ foreach i "btree rbtree hash recno rrecno frecno queue queueext" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $num_test(test) } { incr j } {
+ set upgrade_name [format "test%03d" $j]
+ if { [info exists parms($upgrade_name)] != 1 } {
+ continue
+ }
+
+ foreach upgrade_be { 0 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ set gen_upgrade 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
+ run_method -$i $j $j"} res] {
+ puts "FAIL: $upgrade_name $i"
+ }
+ puts $res
+ cleanup $testdir NULL 1
+ }
+ }
+ }
+ set gen_upgrade 0
+}
+
+proc upgrade_dump { database file {stripnulls 0} } {
+ global errorInfo
+
+ set db [berkdb open $database]
+ set dbc [$db cursor]
+
+ set f [open $file w+]
+ fconfigure $f -encoding binary -translation binary
+
+ #
+ # Get a sorted list of keys
+ #
+ set key_list ""
+ set pair [$dbc get -first]
+
+ while { 1 } {
+ if { [llength $pair] == 0 } {
+ break
+ }
+ set k [lindex [lindex $pair 0] 0]
+ lappend key_list $k
+ set pair [$dbc get -next]
+ }
+
+ # Discard duplicated keys; we now have a key for each
+ # duplicate, not each unique key, and we don't want to get each
+ # duplicate multiple times when we iterate over key_list.
+ set uniq_keys ""
+ foreach key $key_list {
+ if { [info exists existence_list($key)] == 0 } {
+ lappend uniq_keys $key
+ }
+ set existence_list($key) 1
+ }
+ set key_list $uniq_keys
+
+ set key_list [lsort -command _comp $key_list]
+
+ #
+ # Get the data for each key
+ #
+ set i 0
+ foreach key $key_list {
+ set pair [$dbc get -set $key]
+ if { $stripnulls != 0 } {
+ # the Tcl interface to db versions before 3.X
+ # added nulls at the end of all keys and data, so
+ # we provide functionality to strip that out.
+ set key [strip_null $key]
+ }
+ set data_list {}
+ catch { while { [llength $pair] != 0 } {
+ set data [lindex [lindex $pair 0] 1]
+ if { $stripnulls != 0 } {
+ set data [strip_null $data]
+ }
+ lappend data_list [list $data]
+ set pair [$dbc get -nextdup]
+ } }
+ #lsort -command _comp data_list
+ set data_list [lsort -command _comp $data_list]
+ puts -nonewline $f [binary format i [string length $key]]
+ puts -nonewline $f $key
+ puts -nonewline $f [binary format i [llength $data_list]]
+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
+ puts -nonewline $f [concat [lindex $data_list $j]]
+ }
+ if { [llength $data_list] == 0 } {
+ puts "WARNING: zero-length data list"
+ }
+ incr i
+ }
+
+ close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
+}
+
+proc _comp { a b } {
+ if { 0 } {
+ # XXX
+ set a [strip_null [concat $a]]
+ set b [strip_null [concat $b]]
+ #return [expr [concat $a] < [concat $b]]
+ } else {
+ set an [string first "\0" $a]
+ set bn [string first "\0" $b]
+
+ if { $an != -1 } {
+ set a [string range $a 0 [expr $an - 1]]
+ }
+ if { $bn != -1 } {
+ set b [string range $b 0 [expr $bn - 1]]
+ }
+ }
+ #puts "$a $b"
+ return [string compare $a $b]
+}
+
+proc strip_null { str } {
+ set len [string length $str]
+ set last [expr $len - 1]
+
+ set termchar [string range $str $last $last]
+ if { [string compare $termchar \0] == 0 } {
+ set ret [string range $str 0 [expr $last - 1]]
+ } else {
+ set ret $str
+ }
+
+ return $ret
+}
diff --git a/storage/bdb/test/wordlist b/storage/bdb/test/wordlist
new file mode 100644
index 00000000000..03ea15f7277
--- /dev/null
+++ b/storage/bdb/test/wordlist
@@ -0,0 +1,10001 @@
+cooperate
+benighted
+apologist's
+addresser
+cataract
+colonially
+atoned
+avow
+bathroom
+anaesthesia
+columnated
+bogs
+astral
+barbed
+captives
+acclaims
+adjutants
+affidavits
+baptisms
+bubbling
+classic
+allaying
+component
+battlement
+backtrack
+
+courage
+bore
+advertisement
+attests
+bunny's
+airlifts
+cajole
+cataloging
+airily
+collected
+abridged
+compel
+aftermath
+barrow
+approve
+chillier
+bequest
+attendant
+abjures
+adjudication
+banished
+asymptotes
+borrower
+caustic
+claim
+cohabitation
+corporacies
+buoy
+benchmark's
+averting
+anecdote's
+caress
+annihilate
+cajoles
+anywhere
+apparitions
+coves
+bribed
+casually
+clue's
+asserted
+architects
+abstained
+attitude
+accumulating
+coalesced
+angelic
+agnostic
+breathed
+bother
+congregating
+amatory
+caging
+countryside
+chapel
+buttonhole
+bartenders
+bridging
+bombardment
+accurately
+confirmed
+alleviated
+acquiring
+bruise
+antelope
+albums
+allusive
+corker
+cavity's
+compliment
+climb
+caterpillar
+almond
+authenticated
+balkan
+assembly's
+acidity
+abases
+bonny
+been
+abbots
+abductor's
+aerials
+cancels
+chalked
+beeps
+affirms
+contrariness
+clearest
+appropriations
+critiquing
+affluence
+bouts
+abiding
+comprises
+brunches
+biology
+conceptualization's
+assaying
+abutter
+adorable
+beatable
+appenders
+aggressors
+agrarian
+bottleneck
+angled
+beholds
+bereaved
+creation
+animated
+candied
+bar
+aeronautics
+cousin's
+cleaver
+alienation
+billet
+bungler
+contention
+businessman
+braids
+assert
+boisterous
+consolidate
+breathing
+ballot
+averted
+conscientiously
+bellow
+brazenness
+coaches
+bulldog
+classify
+checksum
+almond's
+cornered
+caskets
+capacitors
+beefer
+connoisseurs
+consisted
+adore
+circumvented
+colonels
+addenda
+boost
+compatibility's
+bumblebee
+commonest
+containment
+active
+absorption's
+creaks
+administer
+beset
+aborted
+aforesaid
+aridity
+broken
+azimuths
+aerial
+addition's
+aggrieve
+anthology
+circuitous
+checks
+alley's
+beam
+boss
+corrupting
+absolutes
+asteroid's
+bandstands
+beatitude's
+analogue's
+busts
+confession
+bedstead
+affairs
+blackmailers
+collared
+buckboard
+assassin
+accessor
+adjudging
+binders
+constituent's
+blister
+aromas
+approved
+absorbent
+barbarously
+cat's
+builder
+brandish
+assailing
+constitute
+christening
+acutely
+amount
+blurry
+blocks
+advertise
+chain
+brigade's
+confusion
+beds
+arrangers
+colonizers
+beautifying
+bankruptcy
+bedazzles
+candidates
+clearness
+admonishment's
+behind
+abbreviations
+basting
+ballasts
+amateurism
+celled
+constituted
+bonfire
+bugled
+advisee's
+battled
+budded
+burners
+causeway's
+calibrate
+brambly
+befuddles
+azure
+busiest
+admiringly
+appropriator
+accumulator
+cables
+abhor
+civil
+botulinus
+creaked
+bismuth
+astronomical
+abscissas
+bodice
+aunt
+cascades
+cares
+comradeship
+assemblages
+boater
+bellmen
+admission's
+ambitious
+baldness
+abortive
+controlled
+chinked
+coded
+courtrooms
+arteriolar
+cooler's
+cared
+brewer
+christians
+barbecues
+contacts
+blackjack's
+buzzing
+blasters
+accords
+braziers
+allegretto
+catered
+breveting
+cleaning
+amicably
+bummed
+consulted
+allegro's
+accumulator's
+compartmented
+condemned
+concludes
+bitwise
+cheered
+appropriator's
+accessors
+casting
+carolina's
+accompanying
+budding
+correspond
+bach's
+angel's
+bearing
+arresters
+biweekly
+character
+badgering
+cantankerous
+avalanching
+adjudges
+barometer
+append
+continuations
+burped
+boxtop's
+abstention
+amp
+axiomatized
+bimonthlies
+aghast
+arresting
+breakwater's
+continuing
+bridle
+bobbin's
+antagonistically
+blindly
+biochemical
+biologically
+antifundamentalist
+confer
+cloudiness
+bonded
+comfortingly
+caption
+blackmailed
+bidders
+breakpoint
+brigadier
+criminals
+coyotes
+casserole's
+annex
+cereals
+breadboxes
+belgian
+conductivity
+counterexample
+anarchist
+couches
+atavistic
+clipped
+button
+axiomatic
+capping
+correcting
+chase
+chastise
+angle
+burnished
+beauteously
+antipodes
+crippling
+crowns
+amends
+bah
+brigadiers
+alleged
+correctives
+bristles
+buzzards
+barbs
+bagel
+adaptation
+caliber
+browner
+apprehensions
+bonnet
+anachronistically
+composites
+bothered
+assurer
+arc
+chaser
+bastards
+calmed
+bunches
+apocalypse
+countably
+crowned
+contrivance
+boomerang's
+airplane's
+boarded
+consumption
+attuning
+blamed
+cooing
+annihilation
+abused
+absence
+coin
+coronaries
+applicatively
+binomial
+ablates
+banishes
+boating
+companions
+bilking
+captivate
+comment
+claimants
+admonish
+ameliorated
+bankruptcies
+author
+cheat
+chocolates
+botch
+averring
+beneath
+crudely
+creeping
+acolytes
+ass's
+cheese's
+checksum's
+chillers
+bracelet
+archenemy
+assistantship
+baroque
+butterfly
+coolie's
+anecdote
+coring
+cleansing
+accreditation
+ceaselessly
+attitudes
+bag
+belong
+assented
+aped
+constrains
+balalaikas
+consent
+carpeting
+conspiracy
+allude
+contradictory
+adverb's
+constitutive
+arterial
+admirable
+begot
+affectation
+antiquate
+attribution
+competition's
+bovine
+commodores
+alerters
+abatements
+corks
+battlements
+cave
+buoys
+credible
+bowdlerizes
+connector
+amorphously
+boredom
+bashing
+creams
+arthropods
+amalgamated
+ballets
+chafe
+autograph
+age
+aid
+colleague's
+atrocious
+carbonizing
+chutes
+barbecued
+circuits
+bandages
+corporations
+beehive
+bandwagon
+accommodated
+councillor's
+belted
+airdrop
+confrontations
+chieftain's
+canonicalization
+amyl
+abjectness
+choke
+consider
+adjuster
+crossover's
+agreeing
+consolations
+capitalizers
+binges
+annihilating
+callers
+coordinate
+banshees
+biscuits
+absorbency
+corollary
+corresponded
+aristocrat's
+banally
+cruiser
+bathtub's
+abbreviated
+balkiness
+crew
+acidulous
+air
+birdies
+canvassing
+concretion
+blackjacks
+controller's
+aquarius
+charm
+clip
+awarder
+consistently
+calibrated
+bushwhacking
+avaricious
+ceaselessness
+basically
+accolades
+adduction
+commending
+consulates
+certifiable
+admire
+bankers
+appropriateness
+bandlimits
+chill
+adds
+constable
+chirping
+cologne
+cowardice
+baklava
+amusedly
+blackberry
+crises
+bedeviling
+botching
+backbend
+attaining
+continuity
+artistry
+beginner
+cleaner's
+adores
+commemorating
+amusement
+burial
+bungalow's
+abstinence
+contractually
+advancement's
+conjecture
+buckling
+conferrer
+cherub's
+belonged
+classifications
+baseball
+carbonation
+craved
+bans
+aphid
+arbor
+ague
+acropolis
+applied
+aspired
+calibrating
+abundance
+appeased
+chanted
+ascent
+convenes
+beep
+bottles
+aborigines
+clips
+acquainting
+aiming
+creditor's
+abolitionists
+cloves
+containments
+bungling
+bunt
+anchors
+brazed
+communicator's
+brew
+accumulate
+addicting
+actively
+befog
+anachronisms
+bumblers
+closest
+calculators
+absurdity
+colleagues
+college
+assesses
+conflicted
+associational
+betide
+conceptualization
+adjutant
+alliances
+corresponding
+barometers
+cot
+brooch's
+coiled
+arboreal
+convicted
+artless
+certificates
+bourbon
+astonish
+bust
+correlate
+amounts
+anal
+abstraction's
+corns
+conqueror's
+boldly
+bob's
+beer
+blanks
+corpses
+contingent
+blackly
+backed
+appearances
+cancers
+actuating
+apprehension's
+colorings
+anglicanism
+armament
+armer
+bizarre
+begotten
+actions
+archly
+capriciously
+clue
+contractor
+contributions
+agendas
+coached
+blamable
+annoyers
+coupons
+brooked
+assortment
+axes
+celebrates
+courageously
+baroqueness
+blasphemous
+asserter
+contents
+correctly
+challenged
+bulldoze
+casement
+acknowledge
+bitterness
+belongs
+allotments
+chalice's
+bequest's
+adjacent
+consumer's
+conservatively
+coalition
+background's
+backache
+befouls
+brushfire's
+analysts
+branch
+airways
+awaiting
+breakfast
+anoints
+baying
+contrary
+bilge
+chasm's
+babes
+afresh
+centerpiece's
+barked
+coffin
+assumed
+actresses
+accentuating
+aching
+abet
+balancers
+consumptively
+cagers
+backing
+angiography
+chord's
+cheapened
+bewailed
+arson
+begged
+convergent
+bowlers
+conflicting
+confiscated
+bitch
+bloody
+brushfires
+bleach
+computation's
+choppers
+circuitously
+chancing
+bunker
+concept's
+alacrity
+boyhood
+ammo
+bobwhites
+carter
+ardent
+bier
+airway's
+brownies
+aura
+cannibalizing
+confirms
+australian
+barrage
+closures
+assertive
+abstainer
+bicarbonate
+clone
+back
+cipher
+crown
+cannibalizes
+away
+crafty
+airings
+amtrak
+comical
+burnish
+continuum
+apparition
+apologizing
+blot
+blacker
+characters
+built
+apparent
+applicative
+assiduous
+attorneys
+affectionately
+bobbing
+baggy
+comic's
+attempt
+appealers
+amortize
+bonanza
+backwards
+bowers
+anemometer
+ambulance's
+creeps
+abduction's
+coal
+chiller
+adjudications
+clogging
+ascending
+bookkeeper
+crawlers
+battery's
+artifacts
+attributions
+amusements
+aftermost
+allophones
+bemoaned
+comptroller
+bugger's
+buoyancy
+booboo
+award
+amplifying
+certify
+bivariate
+attunes
+asteroidal
+chant
+collectively
+chasteness
+chapels
+copiousness
+benign
+armies
+competing
+buss
+awakened
+breakpoint's
+conceptualizing
+cleansers
+acorns
+conveyance's
+bluer
+battle
+budges
+characteristically
+be
+contour
+beguiling
+awarding
+armhole
+airship's
+bathtub
+breathable
+crowded
+compiles
+certain
+brutalizing
+bacteria
+baronies
+abode
+blacksmith
+brinkmanship
+capitalizations
+cousin
+botany
+avionic
+companion
+consists
+connoisseur's
+avalanched
+claimant's
+backstitches
+affixes
+bikes
+atomically
+cowed
+asleep
+becomingly
+acorn's
+complainers
+appreciated
+cross
+cringed
+booting
+attitudinal
+broadcasting
+childishly
+breeze's
+craven
+boll
+clause's
+burden
+appendages
+atemporal
+allah
+carnival's
+anchorage
+adjures
+besought
+abounding
+crucifying
+arrangements
+antiquarians
+burrows
+antipode
+canvas
+constable's
+coopers
+ascended
+companionship
+bakery's
+bayonets
+conclusively
+boasters
+beneficiaries
+conspicuous
+contriver
+architecture
+breakthroughs
+brownie's
+blur
+academics
+antagonist
+contemplates
+arena
+caravan's
+administers
+comprehensively
+convey
+bigot
+blitz
+bibliography's
+coerced
+assail
+amazons
+banned
+alabaster
+concluding
+bouquet
+barks
+acquaintances
+astonishment
+constraint
+backpack's
+breakthroughes
+blocking
+accomplishers
+catastrophe
+bushels
+algae
+ailment's
+anemometers
+beginning's
+chefs
+converse
+cornerstone
+astound
+assuring
+adornment
+anyone
+alumni
+club
+bestselling
+businessmen
+constructed
+attendee's
+cooped
+ablute
+chronicler
+alaska
+clam
+canonicals
+concerned
+aligned
+creek
+burrow
+allay
+admirals
+blackens
+compressing
+confirm
+cows
+battleship's
+belched
+affixing
+chalices
+choirs
+absentee's
+baseboard's
+apportionment
+adheres
+accounts
+chef
+access
+clearings
+accompanists
+concentrating
+ado
+bathos
+bailiff
+continuance
+ball
+bearer
+congress
+cites
+can't
+balloon
+crams
+consults
+bungled
+bike's
+apes
+assassinations
+colt's
+consecrate
+ancients
+chick
+analyst
+adsorbing
+burntly
+accompanist's
+apprehensive
+bengal
+boughs
+ankles
+anchored
+benefits
+accommodation
+amiss
+brink
+chewers
+blueberry's
+chairs
+adjoin
+bivalve
+autobiography's
+automated
+comparisons
+climbed
+artists
+congruent
+cold
+atonement
+cashier
+armageddon
+allocations
+bereavements
+bumblebees
+blew
+busboys
+bottoming
+alternations
+apprenticed
+bestial
+cinder's
+consumption's
+abbey's
+amended
+continued
+birefringent
+barbados
+ability's
+compulsory
+antler
+centerpieces
+accountant's
+arrogant
+ballads
+ascenders
+appliers
+adjustment's
+blabbed
+baits
+activity's
+clod's
+adjudicating
+bleak
+commutes
+bumming
+beating
+cohesiveness
+branded
+acknowledger
+communications
+blockhouses
+booklets
+consenters
+creek's
+consulting
+binary
+coaster
+ascription
+bushwhack
+boggles
+affidavit's
+arrangement's
+congressionally
+convenient
+avoider
+abaft
+bootlegger's
+befriending
+ceases
+carbonizes
+clumps
+commented
+competence
+conversing
+butting
+astonishing
+armful
+allegory's
+crisis
+critiques
+concurred
+conservative
+aristotelian
+blizzard's
+corner
+amateur's
+compare
+affiliations
+bestseller
+batch
+cleanly
+assayed
+bravos
+bowls
+conceptualized
+babe's
+algorithm's
+baptist
+cheeks
+conquerer
+bidder's
+behaving
+briefcase's
+analogues
+amply
+attitude's
+apple
+crossable
+ambushed
+besmirches
+creditors
+bandwagons
+continentally
+adjuncts
+concerns
+agers
+cop
+amoebas
+bisected
+bombing
+appendices
+cocking
+bused
+babied
+compounds
+asserts
+believably
+alert
+apostate
+catalysts
+aureomycin
+convex
+beetle's
+banishing
+agitating
+bystanders
+bow
+connotes
+blanch
+charmingly
+animal's
+baritones
+brier
+astronomer
+company's
+balding
+actually
+aunt's
+avalanches
+acquisition
+base
+compilations
+bathtubs
+actualization
+chanced
+atom
+banged
+befuddled
+apologized
+componentwise
+britisher
+began
+conservationist
+actuate
+crosser
+appended
+bitten
+ambivalence
+acetate
+conversions
+buzzwords
+askance
+abolishing
+birdied
+creeds
+anglers
+colossal
+bereft
+chock
+apprentice
+cooper
+besmirching
+allocating
+antiques
+bikini's
+bonders
+afflictive
+augmentation
+atheist
+bucket
+bibliophile
+annexes
+beguiles
+birdbaths
+amendments
+animators
+asymptotically
+communally
+barber
+biographers
+arguable
+confidant
+apologies
+adorns
+contacting
+coarsest
+artichokes
+arraign
+absorbing
+alden
+commercially
+cabbage's
+coincides
+clumping
+cents
+alleviater
+buzzard
+braked
+anesthetized
+bugling
+capitalist
+befriended
+appreciatively
+boomtown's
+cozier
+critic's
+correspondent
+bard
+attenuator
+bake
+brings
+chews
+anechoic
+brutal
+colder
+buckshot
+canvassers
+analytic
+allies
+alloys
+awake
+alienates
+bin's
+crimes
+constructible
+classifiers
+bulb
+cream
+banquet
+axiomatize
+adjourn
+converted
+auditioned
+comfortably
+bandwidth
+cannibalize
+ascensions
+bussing
+balloons
+contenders
+commemoration
+aspersions
+consultation
+cashes
+belting
+augurs
+architectural
+bluebird's
+breastworks
+absconded
+bullets
+bloodstain's
+blunder
+astronautics
+coo
+approves
+authority
+assure
+amsterdam
+acquitted
+adversity
+celebrate
+bred
+bridged
+bloc's
+bullied
+affinity
+breezes
+baptistry's
+constitutions
+avouch
+amazingly
+consolation
+abnormality
+clashes
+buttes
+buzzard's
+breathers
+chipmunk
+contented
+carol's
+armers
+amazedly
+comprehends
+canonicalize
+breakthrough
+arbitrator
+butterfat
+cases
+besiegers
+affianced
+amelia
+bush
+airplane
+annulled
+bike
+alternated
+attackers
+convene
+aficionado
+anachronism's
+crude
+carelessness
+akin
+combated
+assisting
+clocker
+attacked
+briefed
+antic's
+attendants
+attracting
+cope
+allotting
+bandwidths
+add
+assaulting
+breakage
+climes
+arrival's
+burp
+accelerator
+capacitance
+arabians
+bankruptcy's
+archeological
+coins
+browbeating
+chasm
+cardinalities
+compartmentalize
+courter
+assess
+abreaction
+brakes
+compatibly
+compression
+characterizable
+briefing's
+alto's
+classifiable
+contrast
+correlation
+colonial
+applying
+authorizers
+contesters
+basely
+cherries
+clicking
+cornfield's
+alarmingly
+conferences
+business's
+banker
+bloomed
+airfield
+attracts
+building
+commutative
+atomization
+competitions
+boatsmen
+acquirable
+arkansas
+command
+beings
+compactors
+anodize
+arguments
+conforming
+adsorption
+accustomed
+blends
+bowstring's
+blackout
+appender
+buggy
+bricklaying
+chart
+calmer
+cage
+attractive
+causation's
+athenian
+advise
+cranks
+containers
+besotter
+beret
+attender
+cone
+bills
+aligns
+brushlike
+brownest
+bosom's
+berth
+accountably
+bequeathed
+affirmatively
+boundless
+alleyways
+commute
+bendable
+abhors
+calculation
+affidavit
+answerable
+bellicose
+counterfeiting
+admiral's
+chisel
+bridesmaids
+believers
+aggregated
+conspicuously
+abased
+armenian
+conspirator
+canonical
+assignable
+barrage's
+clearance's
+casts
+administratively
+befoul
+chaffer
+amazer
+colorer
+broaching
+crevice
+aniline
+coursing
+compassionate
+adhesive
+bibliographies
+corrects
+augments
+between
+causer
+amorist
+cellist's
+acoustical
+baseless
+cigarettes
+astuteness
+appropriators
+convincing
+bellhop's
+bemoaning
+calmingly
+chronologically
+castles
+algebraically
+appointees
+academic
+blunderings
+assassins
+barrel
+accuracy
+amortized
+ballpark
+acrobat's
+brazier's
+abortively
+coarser
+airfields
+contester
+circus's
+creased
+amorphous
+accomplisher
+blabs
+butchers
+crackles
+bachelor
+aviators
+chariot's
+circumflex
+binocular
+alienating
+artificially
+agreement's
+aglow
+afghan
+abrupt
+annihilates
+apologetic
+barge
+betters
+algorithms
+conjurer
+chargeable
+brindle
+alphabetizes
+coder
+availing
+bandpass
+arrogance
+convent's
+advertiser
+connected
+basso
+breakfaster
+comic
+congenial
+beau
+courters
+adapters
+abruptly
+chemicals
+bringed
+creaming
+butterer
+attained
+actuals
+averred
+brainwash
+centerpiece
+blabbermouth
+byproduct's
+adaptable
+automata
+art
+cheery
+beheld
+beehive's
+claimed
+crucial
+brokenness
+agility
+combating
+cleft
+amenity
+after
+configuration
+contrasting
+coarsely
+brass
+barnstormed
+bowel
+bridesmaid's
+cornfield
+crazing
+autocracies
+adult
+conceptualizations
+corroboration
+bedders
+arroyo
+alarmist
+boatman
+chests
+burglary
+budgets
+canary's
+arraigning
+chin
+barnstorms
+blamers
+brimful
+calculate
+cellular
+contended
+challenges
+brusque
+bikinis
+arithmetics
+chairpersons
+class
+aircraft
+capably
+centralize
+awhile
+compacting
+courteous
+archaeologist's
+cram
+adagio
+affronts
+amplitude's
+bureau's
+audaciously
+autism
+blueberries
+an
+chips
+confiner
+chopper's
+chronology
+breaching
+bead
+amass
+camouflage
+compensation
+aspect
+broker
+atrophy
+balk
+bloodless
+barnyard
+benefactor's
+airdrops
+caused
+anthem
+activist's
+bottomless
+arrogates
+avoided
+bouncy
+clarified
+articulate
+almoner
+communists
+blokes
+butternut
+clockings
+barium
+blows
+criticism's
+associations
+brute
+bleeds
+alliteration's
+bluestocking
+boxwood
+clearer
+allegiance
+conceptualizes
+captivating
+bolshevik's
+belabored
+biographic
+contaminates
+chanticleer's
+adjusted
+childhood
+arguing
+cape
+conversantly
+compensating
+collaborations
+arraignment's
+blasted
+charging
+aggregation
+apprentices
+bird
+codifiers
+ballistic
+breve
+bells
+carolina
+chalk
+buckles
+boyfriend's
+adorn
+accoutrements
+availability
+antisymmetry
+blades
+alluded
+asterisks
+bookcases
+additive
+consents
+advanced
+balalaika
+coders
+caliph
+alundum
+are
+controllable
+blazing
+clattered
+asiatic
+axiomatizes
+ace
+coining
+column
+auditor's
+carol
+concatenated
+arrayed
+capital
+cautioner
+clan
+beauteous
+abbreviate
+asteroids
+canal's
+consolidation
+closets
+concealer
+crevices
+abed
+complex
+conviction's
+abide
+arrests
+begrudges
+adolescent
+conceals
+cells
+circles
+bravest
+compromiser
+bagels
+areas
+afore
+allergies
+arrangement
+attraction's
+amulets
+abstraction
+captured
+crouched
+brothers
+cash
+achieving
+bastard
+compete
+boiling
+beaching
+amphetamines
+clerking
+congestion
+alleviates
+angry
+bared
+comprehended
+bloodstain
+constituency's
+automating
+aerial's
+counterfeit
+besotted
+basses
+biofeedback
+compilation's
+band
+consulate
+appellant
+cough
+antennae
+contend
+anniversary
+boor
+artifactually
+aerobics
+booths
+chubbiest
+consumable
+assignments
+bromide's
+confined
+breakers
+alongside
+courtier
+boisterously
+bilaterally
+alternation
+auspiciously
+arbitrated
+condemning
+burns
+correspondents
+composition
+cavalierly
+coverlets
+capacities
+clatter
+apotheoses
+cartography
+ceased
+capitalized
+auditor
+appendicitis
+chops
+barony
+anemometry
+befouled
+briefer
+chest
+begetting
+bloats
+bookseller's
+commitment
+confides
+carcass's
+battering
+altruistically
+ballots
+adornments
+broaden
+angularly
+coefficient
+cataloged
+brae
+advantage
+anthems
+calculated
+counseling
+agitate
+accentuated
+camel
+ambivalent
+bedposts
+beacons
+chubbier
+cheerer
+assumes
+concord
+autumns
+convention's
+alpha
+adulterates
+arbiters
+archaically
+criteria
+achilles
+cheaper
+bulling
+associators
+bloater
+brawler
+ability
+adherents
+commonwealth
+coyote's
+centrally
+bequeathing
+abandonment
+circumstantially
+courteously
+borrow
+countermeasure's
+capricious
+allied
+anagram's
+absorptive
+assuage
+asset
+booked
+aspects
+commits
+crates
+capacitive
+condones
+assimilates
+carriage
+competitor's
+cocoons
+aggravated
+caravans
+arbitrator's
+baked
+balanced
+annihilated
+addressable
+autonomous
+bandwagon's
+contesting
+burrowing
+coroutines
+abjection
+correctable
+applauded
+bragged
+code
+aggressiveness
+cluttered
+attacking
+chide
+am
+coasters
+blizzard
+contentment
+altruism
+certifier
+capturing
+combinators
+carefree
+activate
+blindfolding
+assassinating
+approximate
+biplane's
+aplenty
+arteriosclerosis
+concentrates
+antisymmetric
+assurances
+anarchist's
+ascend
+advancing
+atrocities
+butt's
+bearable
+craftiness
+categorized
+barn
+contributor's
+arises
+bushy
+bisque
+coasted
+bargaining
+area's
+couples
+cabs
+barter
+bulletin
+chisels
+broadcasters
+contingency
+bywords
+antimicrobial
+coexisted
+blinding
+arithmetize
+coweringly
+convince
+competed
+bauble's
+crab
+boggling
+advocacy
+atlas
+assembled
+ancient
+bloodstream
+balking
+bin
+bully
+affirm
+cruelest
+atone
+conserved
+confession's
+bat
+captive
+aster
+blames
+colonel's
+bones
+borderline
+cleanses
+classified
+crudest
+contiguity
+bailing
+ablaze
+bender
+attendee
+clobbers
+aliasing
+autopilot
+coolers
+cache
+allayed
+barnyards
+britons
+appointment
+adaptor
+blockers
+abridges
+bloodiest
+betrothal
+bombards
+bony
+bus
+canary
+antinomy
+awash
+comrades
+ablating
+collectible
+boats
+brand
+church
+bandy
+adhering
+barred
+ammunition
+chime
+accompaniment's
+battleground's
+composing
+caveats
+armor
+amoeba
+composure
+collides
+avowed
+banding
+counsels
+asymmetric
+abbreviates
+balky
+adjudicates
+anointing
+accursed
+copse
+action
+construction's
+accents
+ambition's
+caressing
+cosmetic
+accession
+clutters
+censures
+allusions
+belittled
+armchair
+abode's
+conception's
+ascribe
+aliases
+ancestry
+ax
+companionable
+aright
+boxed
+brighteners
+alloy's
+checkable
+arraignments
+bed
+bunkhouses
+abbeys
+ceasing
+companies
+cherishing
+chunk's
+barony's
+chinning
+burdens
+briskness
+beggarly
+beloved
+clambered
+constitutionality
+beguiled
+archers
+alleyway
+apostle's
+consulate's
+antiformant
+categories
+construct
+aliments
+acquired
+blotted
+alterations
+adolescent's
+cranes
+bluntest
+accusation
+chafer
+airstrips
+abolished
+bothersome
+churchly
+airy
+bedded
+awareness
+alliterative
+arose
+amputates
+civilization's
+arenas
+certifying
+aspirators
+carbon's
+bunching
+aerates
+bilked
+checking
+cloned
+administrations
+canvasses
+colorless
+chamber
+circumspectly
+benedictine
+advisedly
+classifier
+approachable
+banners
+concurrently
+chores
+agape
+convention
+bindings
+budget
+comedies
+ants
+ambassadors
+chroniclers
+carrots
+colorful
+bulkhead's
+coherence
+buyer
+aggressions
+congressional
+commoners
+cheapen
+concealed
+columnates
+anarchy
+actress's
+baseboards
+creature's
+centuries
+barbarian
+concrete
+bicycles
+acceptably
+acclimating
+biceps
+bloodhound's
+becalmed
+apostle
+bible
+conjunctive
+comb
+ballers
+bickering
+adulterous
+austrian
+applicable
+blackberries
+creasing
+catalogs
+avert
+asparagus
+cambridge
+bird's
+belgians
+admonished
+admirations
+conscientious
+crescent's
+connectives
+blissful
+commenting
+bagged
+assimilate
+abounded
+copyright's
+advancement
+axiom's
+compilation
+circumlocution's
+catheter
+chances
+concretely
+codification
+browned
+clustering
+bum's
+clauses
+boundlessness
+arteriole's
+alfresco
+begrudged
+blustered
+anglican
+adjoined
+bamboo
+bathed
+consortium
+carrot's
+cloak
+album
+bunglers
+approbate
+colored
+aim
+cowboy
+alienate
+cleverest
+ambiguous
+confrontation's
+clear
+africa
+bowline's
+astronauts
+belayed
+censorship
+animation
+bedrooms
+chasms
+compared
+cogitated
+barbarians
+accomplices
+columnizes
+beaming
+busied
+counterpointing
+aluminum
+coconut's
+acclamation
+chokers
+biomedicine
+basalt
+buckwheat
+cardinality's
+bafflers
+arid
+chap's
+abound
+biblical
+backbone
+anticipation
+condemner
+angular
+advisability
+believing
+boiler
+arclike
+abetter
+bespeaks
+axiomatically
+coarse
+auditions
+bludgeoning
+clam's
+chief
+arrow
+cementing
+anxiety
+aberrations
+brushes
+cherub
+corollary's
+bunters
+beefers
+barbiturate
+circumlocution
+conjoined
+charities
+coverage
+campaigner
+burrowed
+barracks
+bristling
+accomplice
+abandoned
+bull
+caked
+century's
+bantu
+bristled
+airer
+bench
+bevy
+chamberlain's
+attention
+cloning
+camouflaging
+alder
+counter
+credibly
+approvingly
+breakup
+artillery
+celestially
+bail
+baker
+bullish
+canvass
+conversationally
+bringers
+augment
+creditably
+butterers
+botswana
+contemptible
+bribing
+adumbrate
+barb
+calico
+alludes
+amplified
+chills
+cloak's
+aver
+arthropod's
+budgeter
+bereavement
+cellars
+crewing
+blackmailer
+ayes
+bedsteads
+breachers
+bazaar
+centered
+celebrity
+blameless
+abscissa
+aerators
+awaited
+british
+adversary
+cowslip
+buttons
+confusing
+buggy's
+belts
+canceled
+addresses
+bribes
+condoning
+bonneted
+coarsen
+amazement
+angels
+chemise
+carbonates
+apostolic
+bandit's
+contending
+consummate
+counterclockwise
+beneficence
+benefitted
+contradicts
+comfortabilities
+anemone
+conductive
+articles
+bookcase
+burst
+baptizes
+countless
+costs
+agonizes
+byte
+creeper
+begs
+bunnies
+attract
+able
+calories
+baskets
+american
+brunt
+cognition
+closing
+chef's
+backbone's
+complicates
+cloister
+bedsprings
+arrays
+brigs
+archbishop
+buckler
+clove
+catholic's
+bellboys
+chairmen
+clap
+clarifications
+ambuscade
+bight
+bellyfull
+allowance's
+academy's
+acquiescence
+ambush
+catches
+at
+billion
+contact
+bees
+adopters
+approximately
+chiseled
+attributively
+criers
+codification's
+cowslips
+contradictions
+buttock's
+categorically
+counterpart's
+confessor
+appreciably
+adjusts
+altitude
+construe
+cancer
+bay
+aristocratic
+alleviaters
+binoculars
+axiomatizing
+changer
+bustle
+civic
+bostonians
+crops
+authorizations
+cogitation
+baptize
+caressed
+abase
+ariser
+axiomatization
+aggravates
+confiscation
+bowdlerize
+backspaced
+alters
+clarity
+blots
+bland
+belligerent's
+burgher
+cardinally
+bookcase's
+buggers
+byte's
+avarice
+crowding
+beriberi
+allegories
+coronets
+cell
+calculative
+adduce
+amperes
+bladders
+adages
+contests
+cognizant
+actuates
+ambiguity
+brighten
+concert
+conviction
+booty
+ashtray
+braves
+blouses
+avoiders
+confederate
+bombings
+couplings
+convictions
+attractiveness
+chronicled
+corers
+anger
+covertly
+aural
+asynchrony
+arrowheads
+breakdown's
+bulletins
+ceremonialness
+clipper
+bracelets
+anthropomorphically
+benedict
+connecting
+bacterium
+achievers
+abutter's
+autocorrelate
+coupling
+blanketer
+continental
+assignment
+conundrum
+arab
+besides
+cheerful
+blowup
+bastion
+arrive
+combines
+agar
+cookie
+astronaut's
+constraint's
+article's
+confiscations
+bounded
+adjudicate
+belligerently
+boron
+brownness
+adept
+creep
+abduction
+accosting
+asylum
+autographed
+clash
+chiseler
+clumsily
+capitally
+braking
+absenting
+bagatelle's
+comet
+basked
+anything
+buffeted
+absentia
+bounty
+carols
+characteristic's
+constructive
+comforting
+aflame
+brainwashed
+booby
+aspirations
+adjudge
+behaviorism
+computability
+assessment
+consultations
+bowstring
+acknowledgment
+arranger
+chancellor
+attest
+compresses
+concessions
+asymmetrically
+administering
+clamoring
+arraigned
+archived
+admonition
+actor's
+aimers
+colorers
+booklet
+calibers
+affix
+bushel's
+atomizes
+creeks
+bleedings
+casuals
+archives
+certainly
+animate
+cons
+affiliate
+answered
+coyote
+coughed
+alligator's
+antagonized
+arousal
+assisted
+aerated
+competently
+conquering
+acclaimed
+assign
+announcer
+controllers
+amalgamation
+comfort
+antihistorical
+availed
+balsa
+annoyed
+basted
+asymptomatically
+cropped
+combinational
+barging
+conversant
+causality
+botches
+bedspread
+considerately
+bookstores
+climate
+blessing
+accordion's
+cdr
+bonanza's
+construing
+bearings
+bluster
+backspaces
+babyish
+countermeasure
+crime
+battered
+audit
+associating
+corps
+application
+archangel's
+aided
+breasted
+compelled
+acrobats
+breakfasts
+chronologies
+beet's
+averts
+convergence
+attributable
+adverbial
+churns
+arrest
+breastwork
+beefs
+brownie
+create
+contradistinctions
+coordinators
+abandoning
+byline
+beatitude
+autosuggestibility
+bipartite
+annals
+assents
+conceives
+amalgams
+cleft's
+clicked
+appointers
+bible's
+boots
+caret
+attaches
+controversy's
+combinatorial
+bazaars
+cardinals
+bored
+catering
+christian's
+ashman
+consequence's
+austere
+clay
+birthday's
+amongst
+arbitrariness
+brainstorms
+chateaus
+coaxer
+applause
+cautiousness
+adorned
+compromises
+creatures
+compliance
+apartheid
+archiving
+amoeba's
+communal
+comedian's
+aggressive
+crop
+ante
+better
+chalice
+aristocrats
+circling
+belittle
+abortion's
+coldly
+certification
+befriends
+courthouse
+anesthesia
+accorder
+athletic
+blithe
+bedder
+abasements
+councils
+beware
+abductor
+assonant
+clench
+aspersion
+abortion
+abating
+birches
+breakpoints
+acyclic
+ablate
+canners
+cistern
+boxtop
+composite
+cloudless
+computation
+chastely
+abusing
+bunker's
+compounding
+alveolar
+chaplains
+bias
+audiological
+capability's
+bangle
+barren
+antidote's
+cranking
+baptizing
+bond
+borders
+automobile's
+allegoric
+chargers
+baltic
+autumn
+columns
+absolute
+connoisseur
+cranberry
+contiguous
+consoled
+confirmations
+argot
+blouse
+annotated
+callous
+astounded
+crashed
+autonavigators
+chivalry
+columnating
+beefed
+convincer
+allegorical
+bagger
+assume
+containable
+artistically
+calibration
+architectonic
+campaigns
+addressability
+crazier
+buy
+brightener
+bastion's
+blurb
+awaits
+commands
+chocolate
+bleaching
+antenna
+blowers
+chorused
+composers
+assigners
+aspires
+coils
+bid
+application's
+clamped
+bedding
+awkwardly
+coppers
+costumes
+borax
+caged
+candler
+badges
+clutches
+consign
+apprised
+buys
+adiabatically
+aggregately
+canned
+abstract
+acrimony
+coax
+analytically
+absurd
+alluring
+contradicted
+aspersion's
+bribe
+boos
+chattererz
+backache's
+complying
+continent
+cohabitate
+causation
+astronomer's
+cities
+bookie
+bleating
+cracking
+bicameral
+convoluted
+adjustable
+ambulance
+can
+boulders
+consideration
+announces
+briars
+antipode's
+bartered
+ancestor
+biplanes
+characterize
+crested
+bum
+bridling
+consolable
+bungles
+coffee
+buffets
+congratulation
+commitment's
+adequately
+clown
+capacitor's
+broomsticks
+agglutinate
+activations
+asians
+canon's
+authenticity
+complexities
+cripple
+bracket
+counselor's
+beatably
+bounced
+baton's
+crankiest
+barbell's
+caster
+casseroles
+ballad's
+bob
+batched
+attenuated
+beakers
+biologist
+bleary
+condescend
+blondes
+augustness
+boldface
+battlefronts
+acumen
+bolting
+articulatory
+butyrate
+bowel's
+backwater's
+colonel
+creating
+authorized
+bijection
+accruing
+admirably
+correctness
+citadels
+clasps
+bandlimit
+bib
+appalachia
+contrives
+bundle
+audiology
+circumventing
+blinker
+choked
+bilks
+clears
+affirmations
+arbitrating
+bites
+bootstraps
+capitals
+commuters
+billeted
+authentication
+choice
+attentively
+aggressor
+arterioles
+crowds
+chestnut
+backstitched
+attachments
+assimilating
+bewilderment
+atrophied
+chintz
+blackjack
+armadillos
+bonfire's
+ballast
+agonies
+busier
+coefficient's
+adventurous
+ballet's
+coil
+chewed
+come
+bonder
+catalogue
+coursed
+arise
+biennium
+ceremony's
+blanching
+appraisers
+acolyte
+argues
+beholden
+appanage
+astatine
+banana's
+coons
+civilians
+bodyguard
+archipelago
+bug's
+candles
+antique's
+accidently
+blighted
+belgium
+besieged
+burned
+abuse
+asian
+chute
+awkwardness
+abasing
+bottler
+ardently
+blab
+breakwater
+cavity
+cheated
+befall
+according
+chronicle
+airframes
+bats
+choring
+authorize
+consumed
+chatter
+annunciated
+capers
+anomalous
+clustered
+burner
+acquaintance's
+badger's
+basic
+affectations
+buzzy
+coast
+attendances
+activating
+beams
+cohesive
+attainable
+barbecueing
+beautiful
+acronyms
+communion
+client
+atypical
+antagonists
+conservations
+arguers
+agglomerate
+antigen
+battalion
+ambition
+countered
+assistant
+classed
+arming
+alveoli
+buff's
+backplanes
+busted
+bermuda
+converting
+brutish
+boot
+acidities
+confrontation
+chapel's
+berlin
+ascender
+behead
+buddy's
+commandment
+actuated
+brilliancy
+chance
+bedrock's
+bridgeheads
+arable
+avid
+arteries
+caresser
+ballyhoo
+attested
+african
+comradely
+consciences
+commencing
+antennas
+annulments
+bobolink's
+advisee
+acceptance
+crack
+ascendent
+appendage's
+accommodates
+accumulated
+clones
+apocryphal
+ages
+cluster
+capitols
+camper
+beading
+amble
+buffeting
+circumspect
+advances
+analyzes
+courier's
+aperiodic
+appealer
+atonally
+attentive
+conspire
+appropriating
+armed
+allergic
+agglomeration
+consternation
+blinks
+audibly
+aspirins
+bunions
+adverbs
+armload
+bet's
+caring
+carryover
+coordinator's
+afterthoughts
+allays
+abided
+brownish
+baiting
+capitalism
+coined
+conspirators
+automatic
+contradistinction
+conductor's
+backstitching
+conjure
+casings
+accountant
+clinched
+constrain
+alcohol
+bee
+anticompetitive
+britain
+bade
+camera's
+antimony
+activated
+burglarizes
+compatible
+cotyledon's
+artificiality
+bath
+citadel
+archivist
+chandelier
+addiction
+ampersand
+bitterer
+constructively
+afield
+bing
+attractor's
+cringe
+allergy's
+bigots
+assimilation
+ate
+capitalization
+abridge
+buzzword
+befit
+bandlimited
+commandant
+alabama
+acculturated
+brightening
+bulldozing
+cooky
+bunks
+centers
+bespectacled
+adherent's
+abducts
+another's
+condensation
+billeting
+bye
+chess
+craziest
+ballgown's
+archaism
+consorted
+chinned
+cowl
+beat
+bootlegger
+bravado
+classically
+bulging
+browbeat
+accommodate
+borne
+bronzed
+artifice
+arcade
+become
+backlog
+addressers
+amphitheaters
+befogging
+crochet
+aiding
+celebrated
+conversational
+backbends
+authentications
+advertisement's
+blockade
+bulldozes
+contraction's
+bricklayer's
+brain
+conveying
+anemia
+chronology's
+channeling
+caution
+commanding
+crosses
+artisan
+conditions
+admired
+authenticator
+airships
+blunter
+bridesmaid
+counseled
+cheeriness
+chiefs
+boils
+clerical
+atrocity's
+balls
+ambled
+canvases
+consoles
+abscessed
+abetting
+blitzkrieg
+bottlers
+beveled
+condemn
+alumna
+cords
+admittance
+annotates
+citing
+corrector
+appreciative
+branching
+betrays
+buttoned
+ailment
+boulevards
+bottlenecks
+chamberlains
+bedbug
+covenant's
+crispness
+considering
+broadcasts
+audubon
+arousing
+correction
+barrack
+closure
+contrastingly
+brittleness
+assassin's
+bursa
+bungalows
+balked
+conceptual
+carcasses
+arabia
+blueprint's
+affectingly
+consorting
+buses
+auger
+appointed
+brute's
+bosoms
+anyway
+arrowed
+anaphorically
+clarify
+approachability
+assistance
+buzzes
+commonplace
+bluebonnet's
+adroitness
+availers
+aquifers
+architecture's
+action's
+backgrounds
+abduct
+attired
+briber
+admissibility
+cease
+beck
+auctioneers
+birdbath's
+atomic
+crossing
+considerate
+biconvex
+bulge
+bedridden
+arising
+aggression's
+cherish
+bureaucratic
+abater
+amputating
+atop
+climber
+clutched
+afford
+bisections
+bonnets
+commendations
+bloke
+abundant
+clamp
+aloes
+aboard
+atheistic
+advantageously
+buffs
+chimney's
+cheerily
+benefactor
+ample
+bushwhacked
+captain
+buckskins
+contextually
+antiquarian's
+browns
+bubble
+ban's
+brine
+acculturates
+anhydrously
+beaver's
+advantaged
+bibliographic
+clasping
+clattering
+coerce
+colorado
+airmen
+bandlimiting
+balks
+boners
+attached
+chosen
+convened
+bordello
+composer
+botanist
+backtracks
+civilization
+commutativity
+bloodshed
+cohere
+bunkhouse
+archdiocese
+boycotted
+crosswords
+bedspread's
+anteaters
+cove
+apothecary
+chute's
+addressee
+climatically
+blower
+bane
+cask's
+beetling
+ambiguities
+before
+abstain
+arachnids
+bucket's
+amateurs
+blackouts
+adverb
+butchery
+conjunction's
+barricade
+audiologists
+aphorism
+complete
+butts
+bishops
+allotment's
+confusingly
+channeller's
+blanches
+bragging
+bathe
+comedians
+celestial
+citizens
+couple
+backpack
+aphasic
+brothels
+axles
+cancellations
+bonus's
+consolidates
+authoritative
+axle's
+acclimatization
+carolinas
+chime's
+antibiotic
+bisons
+biographically
+achieve
+bleachers
+bicentennial
+behavioral
+accomplish
+concealment
+biddies
+antitoxins
+arriving
+apprehend
+affluent
+cliffs
+bleached
+astronomers
+connection
+bride
+backs
+bog's
+casket's
+continual
+ampere
+cat
+alternator
+cotton
+athletes
+communicant's
+best
+befuddling
+benefactors
+appease
+annoyingly
+context
+astonished
+cracked
+amnesty
+autumn's
+binder
+babying
+contributory
+assumption
+cowls
+cocks
+airless
+consummated
+atypically
+beneficially
+chairing
+accusative
+commanded
+bufferrer's
+alerter
+arbiter
+civilly
+charms
+backscattering
+cheater
+bushes
+caverns
+chieftain
+calf
+comparing
+aurora
+butyl
+cower
+bemoans
+baptistry
+carpenter's
+capes
+bordered
+arrows
+blocker
+crest
+appeal
+arabic
+conventions
+axis
+brains
+bookkeeper's
+circle
+cooks
+circumlocutions
+adventists
+barringer
+affording
+anatomically
+basements
+barbarities
+configuration's
+contributes
+collaborating
+beach
+comet's
+bakes
+assigns
+ballerina
+cheapens
+clinging
+conquered
+bisecting
+closenesses
+bugle
+boatmen
+beatings
+complicator
+bight's
+banister's
+archaic
+anthropologists
+clams
+beginners
+committee's
+communicants
+alone
+bounteously
+bastes
+ascertain
+alphabetical
+bringing
+batters
+amazon's
+constituent
+benders
+being
+constitutionally
+audiometric
+blast
+copings
+bailiffs
+colts
+coolies
+airlift's
+boomerang
+bifocal
+clothes
+cashiers
+congenially
+billows
+boilerplate
+biochemistry
+betting
+brimmed
+complementers
+breading
+bragger
+adducting
+bisectors
+abrogates
+criticized
+comrade
+bucolic
+birthright
+blurs
+challenger
+complicated
+bluebonnet
+biscuit's
+classmates
+campus's
+boundary
+bedbug's
+adjustor's
+acre
+bicycling
+awe
+additions
+baiter
+authorizes
+beautify
+copier
+buffet
+belfries
+acquisitions
+brooch
+crickets
+caterpillars
+beefsteak
+complicating
+bedpost
+criminal
+celebrity's
+bookseller
+christened
+coerces
+clamors
+all
+boatyard's
+canoe's
+begin
+anaerobic
+bushing
+agreers
+concedes
+countermeasures
+beg
+agglutinin
+bunted
+ammonium
+aspiration's
+bathrobes
+changeable
+beached
+bestowal
+beaner
+catsup
+admires
+clockwise
+agile
+alarms
+ached
+chinks
+buffer's
+cartesian
+annunciate
+chanticleer
+avenue
+anchor
+alliterations
+blanking
+bargained
+breathtaking
+crime's
+assiduity
+argentina
+contiguously
+aqua
+bested
+borderlands
+appetite
+captive's
+bipolar
+conceal
+counters
+costumed
+arrestingly
+bunting
+blight
+champagne
+brusquely
+address
+bloodhounds
+associative
+creed
+arithmetical
+balustrade's
+belabors
+complementing
+checkout
+archivers
+badlands
+behaviors
+ampoules
+bridgehead's
+antiquarian
+clumsiness
+considerable
+apportions
+anglicans
+appealingly
+barfly's
+absorptions
+awards
+congregates
+cloister's
+armour
+avoid
+correctively
+chucks
+burps
+bums
+berry
+batches
+administration
+atones
+bishop's
+blonde's
+casualty's
+cores
+bodied
+alter
+assonance
+apprise
+antitoxin
+avariciously
+checkpoint's
+affirmative
+conjures
+angstrom
+aesthetically
+canyon
+binge
+crazed
+breastwork's
+aids
+boston
+conceits
+announcement's
+beechen
+accessory
+authorities
+constrained
+automation
+anaplasmosis
+commander
+commendation's
+belabor
+cornfields
+artemis
+asphalt
+contracted
+brochure
+crafted
+allegedly
+alien's
+auditory
+blowfish
+adducible
+confederations
+annuals
+britches
+acquaintance
+appallingly
+abounds
+burglarproof
+crossers
+bayous
+brisk
+authority's
+covetousness
+averse
+accomplished
+aromatic
+admiral
+bijective
+avenging
+bran
+boatyards
+beseeching
+challenging
+bares
+acts
+abductions
+compendium
+compulsion's
+calendar's
+clad
+blockage
+conventional
+craze
+cajoling
+acceptability
+bungalow
+buff
+cramps
+attackable
+calculator's
+asp
+braved
+colors
+balling
+contaminate
+crackling
+comes
+complimenters
+across
+astronomy
+aborigine
+bobwhite's
+autopilot's
+chattered
+appall
+autonavigator
+bashed
+acoustics
+beachhead's
+apartments
+convenience
+blackout's
+bands
+autonomously
+amounters
+centripetal
+achievable
+astringency
+attuned
+concatenating
+copyright
+coding
+assumption's
+anastomoses
+confiscate
+asking
+beneficial
+adhesions
+busboy
+bronzes
+audacity
+bruises
+crash
+beau's
+circuit's
+aborts
+baubles
+beliefs
+assuaged
+costed
+blinking
+characterized
+bowled
+block
+conquests
+confesses
+amusers
+ceiling
+berets
+berliner
+abstentions
+child
+authoritatively
+closeness
+bushel
+considered
+communicates
+cheerlessly
+autofluorescence
+aquarium
+affects
+appurtenances
+airbag
+approaches
+admonishments
+bets
+bounden
+courtly
+bodybuilder's
+campus
+brainstorm
+americans
+chairperson's
+botanical
+askew
+amazon
+bleed
+clime's
+cooperations
+commonness
+boatloads
+blinked
+courtyard
+adapted
+aforethought
+backwater
+burr
+cathode
+awaking
+buzzed
+bridgeable
+arrives
+adventuring
+beseech
+attrition
+copied
+colon
+client's
+bandstand's
+advice
+baptistries
+antithetical
+alcohol's
+contradicting
+ambidextrous
+belches
+category
+bluntness
+coupon's
+assimilations
+comfortable
+caller
+affliction's
+attends
+compactest
+baler
+beacon
+blind
+bleakness
+beseeches
+courts
+couch
+consequential
+adulterers
+craving
+biggest
+astray
+bigoted
+barfly
+charges
+ambiguity's
+commentary
+crankily
+cowerer
+carnival
+bachelor's
+bituminous
+continuance's
+calamities
+claws
+apiece
+century
+ascendancy
+charts
+animations
+aggression
+chickadee's
+carve
+confidence
+actor
+bubbled
+becalming
+convulsion
+chivalrous
+brightest
+centralized
+beautifies
+amateurishness
+birthrights
+alligator
+circumstantial
+constructors
+conceptions
+arranging
+cart
+cent
+ager
+congruence
+carrot
+chariots
+cloudier
+captivity
+conquerers
+compartmentalizes
+condensing
+celebrities
+chalks
+accordance
+chilled
+conversations
+apples
+conceiving
+average
+blessed
+creator
+ant
+cling
+annoyer
+aviation
+cohesively
+correspondences
+boor's
+apprehended
+bessel
+both
+characterizes
+bards
+cots
+acculturating
+cemeteries
+carting
+alcohols
+bitterest
+ascetic's
+conducts
+caking
+airspace
+autocrats
+ashes
+chimes
+broadcaster
+commuter
+basket
+borderland's
+broadened
+boyish
+allegretto's
+ban
+bidder
+christen
+blessings
+bury
+arranged
+choir's
+apathetic
+boring
+aryan
+appearing
+binds
+cooperates
+bounces
+airspeed
+complicators
+adapting
+babbled
+agglomerates
+bedraggled
+addictions
+bolt
+calmly
+blur's
+boatload's
+anesthetic
+bugs
+colt
+completing
+boxer
+billers
+affronting
+absurdity's
+chides
+comparatively
+braided
+clipper's
+cot's
+calves
+articulations
+branchings
+attraction
+concatenates
+alligators
+cake
+boom
+crashing
+afar
+abler
+beamed
+adverse
+adrenaline
+agriculture
+beehives
+crankier
+courthouses
+advises
+consigns
+bisect
+azimuth's
+carpets
+arthropod
+brewery's
+commonalities
+altruist
+astride
+appreciate
+carved
+briefs
+admitter
+celery
+congregate
+clocking
+assassinated
+adding
+canvasser
+civics
+contemptuously
+calculates
+advisees
+bumbling
+algorithmically
+cloudy
+algebras
+addiction's
+cop's
+assurers
+confidently
+affector
+analyzers
+chimneys
+burdening
+antitrust
+admix
+avoidance
+choking
+coexists
+accustoms
+cellar
+anchovy
+constructor's
+confinements
+consequently
+accelerations
+accoutrement
+churchman
+biller
+affected
+brigades
+cremating
+corridor's
+bagging
+ah
+berating
+collective
+acuteness
+arrestors
+cab's
+border
+agitation
+animism
+arches
+alveolus
+cessation's
+averrer
+abash
+counterrevolution
+attesting
+animateness
+bawdy
+americana
+bloodstained
+applicator
+annotating
+annunciator
+clamored
+acting
+aerosols
+axiomatization's
+brags
+coalesces
+avocation
+combining
+crazily
+bravery
+burying
+adored
+airfield's
+accounting
+broadeners
+anise
+chimney
+added
+avenges
+bellicosity
+cranberries
+arsenic
+communities
+comparable
+bunkered
+architect
+alphabetically
+beautified
+apogees
+communist
+anatomical
+complexity
+accost
+autographing
+browsing
+ameliorate
+bookers
+bandaging
+clinical
+appellants
+counteract
+clairvoyantly
+bootstrap's
+canner
+boastful
+attainer
+ash
+beaded
+brake
+barest
+befriend
+burglarproofing
+allegorically
+bunts
+believes
+accession's
+buck
+boathouse's
+byword's
+anthracite
+accuse
+conjunction
+burping
+commandant's
+creativity
+affirming
+bark
+amuses
+balcony's
+auditors
+counsel
+clamber
+borates
+cowboy's
+bickered
+boors
+combing
+biting
+breeze
+crowder
+corn
+bloke's
+bombast
+bookstore
+blared
+bedlam
+carbohydrate
+coops
+bundles
+blistering
+antarctic
+anterior
+bilinear
+chocolate's
+context's
+alternating
+annoyance
+constancy
+ambivalently
+buddy
+brutalize
+bobbin
+alleles
+commotion
+attributes
+airborne
+creed's
+bolstering
+coaxed
+airframe
+breaker
+accept
+abashes
+attentional
+contributor
+comparability
+auscultating
+cocked
+computationally
+buffered
+career's
+analyzable
+absently
+courtyard's
+buildups
+apportioned
+balkanized
+annulling
+cremation
+buffetings
+conditional
+confided
+airliner
+bulldozer
+approaching
+anagram
+apollonian
+canaries
+bloat
+bluebird
+collision
+cool
+connectedness
+abasement
+artisan's
+avoidably
+clerks
+afflict
+briton
+corroborates
+cameras
+counted
+boldest
+burglars
+brutes
+brows
+abhorrent
+configuring
+averaged
+ace's
+buying
+abandon
+bayou
+cottons
+auditioning
+amplifies
+clippers
+brainstorm's
+alto
+brutalities
+bunch
+agricultural
+bursts
+blunting
+archer
+activity
+carefulness
+bedroom's
+concomitant
+balm's
+artificer
+barking
+breathy
+babies
+acacia
+bodies
+cap's
+criticised
+conversed
+crewed
+ascendant
+budgeting
+coroutine's
+charmed
+bellboy's
+conservatism
+butler
+acculturation
+conclusion's
+adapt
+cellist
+contempt
+adumbrates
+borrowed
+confounds
+allegiance's
+blabbermouths
+accrues
+captor
+coop
+baseballs
+cottages
+apartment's
+assertiveness
+assent
+artfully
+bagger's
+abolishment
+acetylene
+accessory's
+blackbird
+baptist's
+consist
+cavern
+buttock
+corporal's
+autoregressive
+bailiff's
+birds
+corder
+bracketing
+antlered
+barbiturates
+county's
+addicted
+agglutinated
+abashed
+competitively
+captains
+bloating
+accepts
+choose
+ashamed
+backyard's
+apiary
+contradiction
+balalaika's
+arctic
+broom
+anvils
+coffee's
+alliance's
+agitator's
+change
+adjusters
+cremates
+complexes
+bodyguard's
+burl
+antithyroid
+ambient
+airfoil
+apricots
+athleticism
+abjectly
+bankrupts
+answerers
+alternatively
+confronter
+breaking
+baronial
+cannibalized
+appetites
+breaded
+blackboard's
+battlegrounds
+cosine
+barrenness
+abbreviation
+budging
+boolean
+acrobatics
+again
+ashtrays
+clashed
+contingent's
+compulsion
+bedazzled
+collapsing
+comparison's
+businesses
+compassionately
+achievement
+buffering
+candlesticks
+austerely
+awls
+associate
+absolved
+annexed
+airway
+clipping
+counselors
+conscience
+attempters
+constructing
+biases
+cautioners
+comma's
+cosines
+char
+auscultates
+afire
+comely
+amity
+beverage's
+anew
+ballplayer's
+adulterated
+authorship
+alterers
+burdened
+attributive
+afflictions
+blinded
+barrier's
+attachment
+brotherhood
+bridegroom
+atoms
+cobweb's
+copes
+controversies
+complexion
+crawling
+atomized
+adjust
+accuracies
+concern
+cinders
+authorization
+appraisingly
+bladder's
+cooked
+cowers
+batter
+commissioner
+close
+burglar's
+allocated
+anvil
+aftershock
+abrogating
+chemistries
+advisable
+conduct
+committee
+blaring
+appalling
+braveness
+alertly
+artificialities
+brevet
+collision's
+arizona
+bower
+creamers
+awnings
+arsenals
+crane
+city
+contemplative
+catheters
+administrators
+attorney
+churned
+attractions
+columnation
+bobbed
+centipedes
+bostonian's
+apprises
+buries
+allege
+botulism
+adobe
+ambassador's
+covenants
+boon
+asynchronously
+bigness
+axial
+chaffing
+battleships
+ant's
+anthropological
+accent
+brushing
+brassy
+consumptions
+battleship
+absorb
+beckons
+brook
+connectors
+clinches
+accesses
+beaters
+archaicness
+bursitis
+chided
+bomb
+assimilated
+addicts
+convening
+arianists
+counting
+altar's
+confusions
+attachment's
+clipping's
+amazing
+corset
+bossed
+attach
+commandingly
+animatedly
+allegations
+assuages
+annulment
+compress
+aptitude
+absurdities
+autobiographic
+aspect's
+concentrator
+burgesses
+anagrams
+bedeviled
+assemblers
+convinced
+commentary's
+agglomerated
+biological
+callousness
+axolotl's
+atmospheres
+authoritarian
+cancer's
+above
+charting
+aldermen
+battler
+cistern's
+bouncer
+amassed
+conquest
+altering
+arrogantly
+brokenly
+comparator
+counsellor's
+attenders
+cackle
+criticize
+authored
+ably
+believed
+compelling
+accepter
+cleansed
+afflicted
+backslash
+computed
+almighty
+attache
+braes
+carriage's
+benediction
+brigadier's
+contemporariness
+boomtown
+amplitudes
+breakwaters
+clod
+catch
+bar's
+activist
+caves
+assenting
+camp
+attainments
+brotherliness
+continuances
+appearance
+applicator's
+browbeats
+banjos
+addendum
+became
+adduces
+armadillo
+brothel
+almanac
+courageous
+assault
+chunk
+coaching
+atheist's
+blunted
+aperiodicity
+congresses
+boastfully
+burglarproofed
+broadest
+bashfulness
+affect
+acne
+bottleneck's
+criticisms
+corrupts
+colonized
+closeted
+canonicalizing
+auditorium
+antenna's
+awfully
+anti
+consumes
+agonize
+algebra's
+championing
+blush
+bugger
+antagonize
+beethoven
+blase
+boycotts
+compensatory
+bugged
+boroughs
+anatomic
+batons
+arguably
+affricates
+appreciations
+cavalry
+alumna's
+arcing
+backpacks
+braces
+contextual
+coupon
+chillingly
+allocates
+abuts
+contribution
+commodity
+admonishing
+coolly
+cabinet's
+collapsed
+confessions
+adjured
+capriciousness
+chastising
+babe
+aerodynamics
+accepting
+concept
+contour's
+consequentialities
+birthday
+bankrupted
+birthed
+benefit
+concentrations
+azalea
+channels
+chestnuts
+contenting
+antedate
+censors
+contagious
+abbot's
+channellers
+apt
+commend
+avocation's
+admonition's
+abolition
+confederation
+carried
+clumsy
+coincidences
+bumper
+burr's
+bugles
+bribers
+attainably
+consume
+comma
+creativeness
+accuser
+bombs
+abbey
+baffled
+aside
+clip's
+appeases
+compass
+bundling
+abstractionism
+confide
+creases
+apropos
+confronted
+corrective
+concurrencies
+autocratic
+alien
+attending
+antagonistic
+broadcast
+asymptote's
+belied
+breasts
+contrapositives
+coiner
+accordingly
+cohering
+computers
+cow
+bibs
+ancestral
+controller
+attacker
+alerts
+coconut
+agency
+alerted
+alcoholism
+ammoniac
+actinometers
+acquitter
+bud
+cessation
+alleging
+centralizes
+articulators
+council's
+carvings
+arduously
+blown
+anode's
+arrogate
+bisects
+centimeters
+burgeoning
+course
+appointee's
+ascribable
+communicate
+contrivance's
+adoptions
+attune
+acres
+abyss's
+corporal
+certifiers
+analyze
+augusta
+bestseller's
+checkpoint
+coexist
+attainers
+argon
+bearded
+crudeness
+averaging
+brick
+adducing
+annulment's
+chicks
+blocked
+cisterns
+afoul
+affiliates
+briskly
+adhesion
+ascertainable
+appeasement
+blueprints
+agreements
+blindfolds
+communicator
+characterization
+annoyances
+breeches
+brushed
+clinic
+competes
+chuckled
+cradled
+balmy
+antisubmarine
+alternate
+armpits
+barn's
+conjuncts
+adhere
+allows
+counteracted
+appetizer
+capturers
+cleanse
+avant
+abbe
+corpse's
+arduousness
+badge
+begets
+contemplated
+caveat
+copiously
+athena
+aggrieving
+alibi
+accumulation
+basket's
+aftershocks
+bass
+conjuncted
+chaps
+brunch
+colonials
+bibbed
+clusters
+antagonizing
+constituencies
+combings
+bearish
+continuously
+adequacy
+brow's
+catalog
+alderman
+comedic
+chemists
+concernedly
+conceded
+alarm
+arced
+buckle
+confidingly
+coherent
+closes
+buffoon
+brace
+adjustably
+crackers
+contamination
+burgess's
+aerobic
+constitutes
+baptismal
+broadness
+blimps
+concatenation
+claiming
+bard's
+aerosolize
+adjoins
+copies
+coats
+boggle
+corroborated
+concreteness
+bill
+cautions
+bantam
+bearably
+armchair's
+birthright's
+cravat's
+cone's
+courtiers
+asunder
+bulletin's
+biopsies
+alley
+contrive
+blasphemies
+amuser
+ballerinas
+blushed
+causticly
+brandy
+blinkers
+complimenting
+crimsoning
+angola
+apprehensiveness
+bolster
+columnate
+byproducts
+berths
+accusal
+chubby
+arrived
+camps
+blemish's
+anaconda
+cook
+airfoils
+atlantic
+boosted
+converge
+availer
+appalachians
+coffin's
+boarding
+alga
+crouch
+columnizing
+consul's
+chastises
+angling
+apple's
+billiard
+attentiveness
+adroit
+apprehensible
+cereal
+blouse's
+browning
+bodybuilder
+coaxing
+assertion's
+connective's
+commemorated
+accountability
+crooked
+blips
+chandeliers
+aristocracy
+bangs
+coke
+abutment
+community
+calculus
+congregated
+crepe
+compromised
+airlines
+contributing
+contingencies
+coordinated
+alginate
+batted
+contender
+alma
+antagonisms
+accompanied
+airport
+administrator's
+appraisal
+breadbox
+condemnation
+backlog's
+available
+consequents
+crooks
+commonwealths
+barring
+channeller
+crucially
+archaeological
+charming
+adventist
+credits
+appetizing
+breads
+clients
+climbing
+aloneness
+abstractness
+appearer
+astute
+clockers
+antagonizes
+agonized
+bastard's
+conjectured
+aqueducts
+aureole
+boatswains
+conjured
+chauffeur
+complementer
+behold
+bustards
+bivouac
+cluck
+anus
+bless
+catastrophic
+bounty's
+allowed
+answer
+concealers
+brainchild's
+coercion
+buzzword's
+bordellos
+appertain
+applier
+couriers
+aesthetic's
+craft
+capacitances
+capped
+coupler
+category's
+anvil's
+conquest's
+checksums
+clucking
+bronchus
+acrimonious
+changeably
+accenting
+argued
+conditioning
+brewing
+backwardness
+cascaded
+atomize
+contours
+arianist
+apart
+conflict
+carefully
+banshee's
+conveys
+arbitrates
+amphitheater's
+amen
+alimony
+bound
+buzz
+courtroom
+apparently
+coalescing
+circulating
+amounter
+bypasses
+breadth
+choral
+completion
+arisen
+anticipating
+bilges
+contractions
+bedspring
+commune
+blacklisted
+beagle
+alkaline
+atolls
+carelessly
+blimp
+corking
+brevity
+alterable
+canada
+bear
+bluntly
+cartridges
+connoted
+countries
+corroborate
+consecration
+corrupted
+appreciating
+combatant's
+alkalis
+affecting
+blues
+casserole
+ballad
+bewitches
+common
+as
+because
+bathroom's
+anchorages
+beguile
+connect
+convenience's
+counteracting
+assorted
+care
+contains
+centimeter
+ancestors
+briefings
+busses
+churchyards
+breakable
+amortizing
+courthouse's
+click
+courses
+ajar
+county
+covet
+confidences
+capitalizer
+agog
+backtracking
+copious
+bestsellers
+chilliness
+bringer
+browse
+centipede
+bawled
+bricklayer
+breath
+assailants
+abysses
+command's
+characterizer
+calculating
+america's
+aurally
+contain
+alias
+commentators
+confounded
+appending
+accidents
+chatters
+coordinates
+bleeder
+blueness
+badger
+bolsters
+astounding
+capitalist's
+conservation's
+commences
+aimed
+bun
+comparators
+competition
+bauble
+backbend's
+bled
+assassinate
+chop
+anemometer's
+cobbler
+coldness
+audiometry
+affinity's
+amalgamates
+cowardly
+consolidating
+beads
+brackish
+bookings
+accuses
+bog
+compartmentalizing
+clutching
+calming
+collars
+clambers
+banqueting
+beaked
+authoring
+correspondence
+apostrophes
+affirmation's
+bespeak
+costing
+brought
+complainer
+battalions
+asymmetry
+boathouse
+canyon's
+awarded
+amplitude
+anarchical
+anticipatory
+bolder
+cooperatives
+caterer
+adviser
+balkanizing
+augur
+cannibal's
+balustrades
+attaching
+collector's
+commercials
+capaciously
+coincidence's
+bumps
+ascot
+bale
+blackmail
+baby
+aftereffect
+bloomers
+buttresses
+avenues
+climaxes
+aqueduct
+cater
+brainchild
+avail
+bypassed
+bowl
+california
+cements
+boxes
+brained
+bedevils
+captors
+acuity
+ascends
+breakthrough's
+assigner
+caner
+bequests
+ceilings
+axers
+bookshelf
+autistic
+celebrations
+axons
+chiding
+asterisk
+allophonic
+blindingly
+cherubim
+boaster
+confining
+anxious
+clowning
+advisement
+approach
+anesthetic's
+crescent
+alertedly
+birdbath
+beardless
+bras
+auspices
+choosers
+approval's
+afflicts
+corrosion
+arpeggio's
+bodyweight
+cranky
+battlefront
+affirmation
+churchyard's
+aeroacoustic
+anders
+adjustment
+baneful
+citation's
+acetone
+blend
+binuclear
+boner
+annotation
+announce
+claimable
+contemporary
+clothing
+acquitting
+choosing
+attacher
+bananas
+binaural
+arrestor's
+aches
+conclude
+collaborators
+await
+blaspheme
+bequeaths
+crows
+balconies
+begging
+conducting
+abstracts
+assignee's
+causations
+approximation
+articulated
+considerably
+apricot's
+afferent
+assertively
+bonding
+calms
+cranberry's
+cost
+captaining
+agenda
+corridors
+complaint
+christens
+aggravate
+countess
+arbitrators
+ascribing
+breech's
+bellwether's
+burglarized
+confinement's
+animating
+adjectives
+cannister's
+bemoan
+cleanest
+acme
+cheapest
+activities
+allophone
+boy
+belaboring
+captions
+compactor's
+actuator's
+befouling
+arachnid's
+computerizes
+compile
+absorption
+bridled
+absorber
+convicts
+birch
+alkaloid's
+cannot
+bacilli
+charitableness
+abated
+ceaseless
+beavers
+bookshelves
+commensurate
+appreciates
+basil
+cartoons
+aides
+buxom
+cages
+cantor's
+acceptances
+antiquated
+amalgamate
+babyhood
+beers
+conforms
+bouquets
+canner's
+baste
+cashed
+argue
+butcher
+backbones
+absolve
+crib's
+cafes
+abstracted
+book
+committees
+authentically
+conference
+antisera
+bourgeoisie
+attribute
+biddy
+autobiographies
+chivalrousness
+coverlet
+ambiguously
+calorie
+anhydrous
+alignments
+around
+archfool
+advance
+bedpost's
+affective
+contained
+amain
+bromides
+clogs
+bricker
+arduous
+consistent
+amidst
+confess
+complain
+anniversaries
+coasting
+cobwebs
+aries
+benchmark
+aviaries
+bombard
+boxers
+ashtray's
+assyriology
+blaze
+ablative
+chaos
+burro
+arguer
+ashamedly
+crier
+allocator's
+aggressively
+carts
+advisory
+airship
+alkali's
+backup
+chaining
+continue
+cartoon
+circumference
+breadwinners
+autonomy
+banking
+armored
+cabin
+chunks
+antigens
+blistered
+airers
+breakaway
+belief's
+belays
+coveting
+auburn
+careful
+anybody
+bumbled
+cautious
+adopter
+ballplayers
+anteater
+citadel's
+avails
+agent's
+caliphs
+bridgehead
+already
+caterpillar's
+coachman
+centralizing
+alphabet
+concede
+barbell
+breadboard
+ballast's
+activators
+attendance
+blandly
+calculator
+codeword
+addressee's
+avenue's
+alcoves
+alternately
+admonishes
+concentrate
+crossbars
+adjoining
+basset
+carbons
+beast
+blonde
+castle
+clarification
+bitch's
+abrasion's
+books
+amputate
+bicycler
+aphonic
+arraigns
+acquiesce
+buster
+chaperon
+advisements
+buyer's
+attack
+birthdays
+blazed
+confuser
+crag
+ballet
+airports
+bison
+counterexamples
+arteriole
+colony's
+adamantly
+blunders
+chivalrously
+adult's
+authors
+amplifiers
+counterfeited
+complicity
+astrophysical
+axolotl
+bash
+battleground
+butterfly's
+axioms
+allegory
+blitzes
+blindfold
+bufferrers
+approximating
+byways
+computations
+alight
+avoiding
+assurance's
+barrages
+canonicalized
+callously
+auditing
+authenticating
+bag's
+asters
+artistic
+bonanzas
+applaud
+certainties
+auto's
+concession's
+cascade
+chubbiness
+churchyard
+afternoons
+antigen's
+baron's
+amphibian
+banister
+capitalize
+approval
+appropriated
+bureaucrat's
+covets
+cloisters
+circulate
+bivalve's
+beta
+collector
+among
+cane
+birdlike
+attenuating
+conjunctions
+appliance's
+coral
+crucify
+abnormal
+combined
+classroom
+buckskin
+commissions
+abolishments
+arching
+croak
+americium
+associates
+car's
+assuringly
+agreer
+anticoagulation
+closure's
+corkers
+attend
+alphabet's
+awakening
+composedly
+attracted
+construed
+cricket's
+applicability
+autonavigator's
+chloroplast's
+ashen
+beggars
+corporation
+another
+conflicts
+bootlegs
+archeologist
+alcove's
+agitates
+cargoes
+creditor
+cops
+advisably
+coronation
+bourgeois
+crochets
+cropper's
+cramp's
+adulterer's
+corroborations
+changing
+combinatorics
+calm
+comprehensible
+blooms
+coolness
+copying
+blacksmiths
+commodore
+compulsions
+clump
+afterward
+crucified
+brooder
+buckets
+accelerating
+accented
+boat
+adventitious
+baseline's
+courier
+calamity's
+atoll's
+brutalizes
+bundled
+chairperson
+cheeses
+continuation
+celebrating
+apologists
+behest
+bumpers
+consonants
+circulation
+betraying
+commuting
+breezily
+circumstance
+coughing
+benefiting
+conquerors
+chemically
+commencement
+adjustors
+angel
+congratulate
+conspired
+causally
+bud's
+conquers
+augmented
+bereaving
+advisor
+articulation
+angler
+admission
+bide
+competitors
+amusement's
+collecting
+adder
+arithmetized
+cheek's
+apostrophe
+blockages
+clockwork
+bubbly
+apricot
+adjudicated
+banter
+amused
+breacher
+bracketed
+aimer
+comprehending
+bunkers
+canton
+arcane
+absent
+capitol
+consequence
+cognitive
+abjuring
+clever
+coronet
+anathema
+artichoke
+controls
+credulous
+acid
+crawled
+coupled
+boomtowns
+aspen
+acted
+anyhow
+burdensome
+backdrop's
+apocalyptic
+cornerstone's
+cautiously
+blisters
+conveniences
+arbor's
+accessories
+alleges
+clubs
+accompaniment
+blazes
+annually
+clique's
+beamers
+ballgown
+autumnal
+acreage
+conjunct
+balances
+consoling
+canvas's
+competent
+aggrieves
+although
+afraid
+clearly
+cognizance
+acoustic
+colleague
+causing
+absences
+closers
+airs
+cinder
+adversaries
+altruistic
+brews
+ceremonially
+appraisal's
+commissioners
+army's
+assists
+acceptor
+comparison
+cooling
+conveniently
+couching
+changes
+clinic's
+confronting
+adjunct's
+blandness
+alternates
+bunter
+consequent
+clean
+autos
+accumulators
+carver
+aprons
+awful
+bobbins
+blasphemy
+assuming
+abscess
+assemble
+cabinet
+atomics
+blacklists
+audacious
+assay
+anthropology
+barnstorm
+awl
+bumping
+assembles
+capture
+compensates
+coverable
+amend
+array
+continually
+absented
+cigarette
+antiresonance
+backspace
+branched
+appellate
+courtroom's
+alienated
+austerity
+cement
+asked
+antelopes
+cottager
+bluebonnets
+booze
+amendment's
+backslashes
+begun
+bijections
+cafe's
+boatload
+collect
+appeals
+belittles
+befit's
+beauty
+arrogated
+academia
+contagion
+blemishes
+coverlet's
+comfortability
+antecedent
+controllably
+congressman
+complicate
+coincide
+arrears
+clumped
+credited
+buffoon's
+catholic
+accompanist
+beauty's
+aster's
+blatantly
+bothering
+bewilder
+canceling
+carbonizer
+accentuation
+backstairs
+anticipations
+bestowed
+civilian
+blooming
+blunts
+airlocks
+argo
+blueprint
+aristocrat
+cakes
+complements
+ale
+camping
+army
+adrift
+bengali
+barely
+blasphemes
+briefcase
+brooches
+ailments
+blazers
+crevice's
+bankrupt
+archiver
+articulator
+alphabets
+bonds
+colliding
+candidate
+cashier's
+bellwethers
+airstrip
+announcers
+calendars
+corrupter
+aqueduct's
+axiom
+bathing
+blusters
+ascribed
+admittedly
+angrily
+analytical
+contraption
+convertibility
+abysmal
+cathedral's
+aversion's
+algol
+articulately
+breveted
+bickers
+chatterer
+adoptive
+bijectively
+cloudiest
+coarseness
+carted
+cocktail's
+capacious
+anion
+buffoons
+bleeding
+bedrock
+adventurer
+compositions
+camouflages
+brittle
+chip's
+aloe
+chorus
+cargo
+critical
+biographer's
+abject
+blasphemousness
+charmer
+betray
+blacking
+awoke
+allele
+bags
+claimant
+clover
+biographies
+confound
+advertises
+crafter
+cripples
+bygone
+concentric
+couldn't
+contentions
+acrid
+costume
+aft
+aesthetic
+bandits
+adducts
+constellations
+coffer's
+created
+commercial
+art's
+cookie's
+ammonia
+adjunct
+articulateness
+congratulated
+crags
+brandishes
+annual
+byword
+affection's
+college's
+aboriginal
+bikini
+buttering
+allotter
+console
+advent
+activates
+beverage
+april
+acceptable
+barrel's
+boys
+attractor
+azimuth
+critics
+ballooner
+aren't
+adulterating
+criticise
+abeyance
+automatically
+collaborative
+capabilities
+crawls
+anomaly's
+climaxed
+animately
+aroma
+belie
+attires
+argumentation
+baseboard
+bluebirds
+cactus
+byproduct
+balancer
+beholder
+conservationist's
+betrayer
+agony
+accusingly
+convict
+coaxes
+breeds
+agitated
+championship
+brevets
+auscultate
+counselling
+cornerstones
+america
+canoes
+aspirator
+compensate
+antiseptic
+bereave
+absinthe
+compose
+collide
+alabamian
+candid
+civilized
+clamps
+authoritarianism
+colonist
+bugging
+bins
+abashing
+battlers
+canning
+berate
+assembler
+amateurish
+boasted
+angriest
+bluffs
+colonize
+balcony
+bleat
+bustard's
+attenuate
+contagiously
+bicep
+babel
+beatniks
+brush
+analogy's
+audiologist
+assessment's
+camera
+arbitrary
+alleyway's
+concession
+constructions
+accompanies
+accretion's
+aroused
+charcoaled
+belated
+bottom
+bloodshot
+bisques
+advocate
+arabs
+cathodes
+adamant
+challenge
+absurdly
+abolitionist
+cleavers
+bludgeons
+bassinet
+clause
+coiling
+cask
+boob
+azalea's
+afghanistan
+carriages
+blade's
+bobby
+asinine
+acclaiming
+absorbed
+blacken
+cheating
+bootleg
+anonymous
+addict
+astonishes
+awry
+adequate
+categorization
+casks
+blaster
+aspirants
+abscesses
+airing
+assumptions
+capitalists
+board
+asynchronism
+body
+aye
+contraction
+athens
+arsine
+cohabitations
+below
+bows
+aviator's
+ampoule
+connective
+adapter
+authenticate
+blackboard
+brilliant
+appoints
+attics
+conquer
+boning
+comestible
+camped
+blonds
+aisle
+coals
+billboards
+characterizers
+crow
+clout
+admirer
+actuarially
+abstruse
+accessing
+bonfires
+clenched
+characteristic
+catching
+chars
+canons
+barrier
+championed
+butterflies
+completely
+calendar
+artwork
+abjections
+burgher's
+correlates
+arrivals
+accepters
+circuses
+breadboards
+accomplishment
+analyzed
+appropriates
+cancel
+bordering
+aperture
+civilizing
+assortments
+blackest
+blitz's
+copy
+commenced
+admirers
+cheers
+croppers
+cliff's
+circumstance's
+bibles
+buttressed
+consecutively
+birefringence
+automaton
+cheerless
+chopping
+ballooned
+convent
+acknowledgers
+appointing
+belies
+comeliness
+bangle's
+communication
+bisector
+avocations
+clique
+brainstem
+campusses
+allocators
+bramble's
+assaults
+commemorate
+appendix
+agent
+apportioning
+bottled
+artifact's
+block's
+archery
+bagatelles
+candies
+catched
+cognitively
+creepers
+concentrated
+bout
+balustrade
+abodes
+carrying
+confirming
+cannibal
+chinners
+carbonate
+anguish
+butt
+colons
+ablated
+corporation's
+cock
+convincers
+beret's
+bluish
+compressive
+authenticates
+commemorative
+bureaucracies
+coinage
+coach
+assigning
+concentrators
+capitalizing
+appraisals
+belaying
+candy
+blossomed
+bricks
+atonal
+analogue
+caters
+barbaric
+applique
+clink
+audio
+actress
+assyrian
+apprehension
+conversation
+apsis
+bedevil
+comics
+affricate
+comings
+buttress
+angering
+buckboards
+bombed
+adversely
+adequacies
+commended
+causeways
+adherers
+codes
+aquaria
+ape
+bulks
+compactly
+brainwashes
+bleats
+commandants
+conditionally
+adjourns
+clobbering
+allowances
+buildings
+complemented
+blanker
+algeria
+brief
+creak
+adductor
+categorizer
+approacher
+argument's
+clocked
+bedazzle
+cause
+coordinator
+buildup
+countenance
+abhorrer
+backtracked
+bogus
+closer
+broilers
+chirps
+adjournment
+belles
+bitingly
+befogged
+contexts
+amorous
+breeding
+abortions
+blockage's
+alternatives
+bouncing
+beryl
+ballistics
+banters
+carpenters
+auction
+bowdlerizing
+brazen
+bonuses
+circulated
+adultery
+archival
+bears
+baptized
+burglaries
+borrowing
+barbarous
+casher
+adolescents
+atrophic
+busily
+aerating
+coatings
+athenians
+casing
+consuming
+alphanumeric
+beaches
+bisection's
+conjecturing
+aspirate
+biography's
+accompany
+bureaucrat
+broomstick's
+colony
+coalesce
+clock
+bequeath
+collaborates
+belonging
+configured
+burlesques
+anode
+consenter
+bug
+counterpoint
+counts
+bangladesh
+analogical
+accident
+bulky
+affinities
+abysmally
+boorish
+assiduously
+cannisters
+autocollimator
+bassinet's
+barrelling
+blurts
+carbonize
+candle
+act
+addressees
+constraints
+boast
+complaining
+coziness
+avocado
+coolest
+blank
+beadles
+anytime
+covetous
+appellant's
+angers
+academies
+ageless
+chased
+constitution
+consonant's
+boosting
+ascetics
+aerosol
+apse
+blushes
+clang
+confers
+confidentiality
+coolie
+colon's
+chickadees
+badminton
+argonaut
+constituting
+aloha
+contracts
+broomstick
+brackets
+attendant's
+connection's
+conciseness
+abstractor's
+composes
+chaste
+assures
+conjuring
+barbital
+bunion
+bases
+clowns
+barrelled
+audience
+auctioneer
+complexly
+aviator
+conjectures
+backscatters
+cheerfulness
+communicating
+agreement
+bricklayers
+bilabial
+abstruseness
+cobol
+cooperating
+admit
+blundering
+accelerates
+assaulted
+concealing
+anachronism
+bowels
+butane
+anniversary's
+converts
+convoyed
+climates
+barriers
+clubbing
+additives
+bask
+confessing
+caravan
+colonizes
+continuous
+cheerlessness
+boggled
+armpit's
+bridgework
+allegro
+cricket
+cannon
+adoption
+clanging
+auscultations
+billowed
+alphabetize
+airlift
+appointee
+boyfriend
+chaotic
+corrections
+bonus
+contrasted
+convulsion's
+confessors
+adumbrating
+autocrat's
+coronary
+authentic
+barley
+brawling
+aegis
+appends
+bolshevism
+charted
+applicant
+aileron
+considers
+chin's
+alkyl
+amendment
+boulevard's
+avian
+breather
+canyons
+cannon's
+apportion
+badgered
+augers
+advisers
+censuses
+beveling
+aught
+arthogram
+anonymity
+appliance
+atmospheric
+anesthetizing
+ambulances
+blustering
+burnt
+chestnut's
+collects
+aliment
+anxieties
+championship's
+channeled
+arrival
+amassing
+corpse
+bedtime
+blackbirds
+cats
+constants
+chemistry
+brewery
+brother's
+boasts
+accentual
+bellwether
+bely
+courted
+baroness
+configure
+collection
+aviary
+achieves
+belfry's
+beech
+baseman
+bacterial
+contestable
+blond
+contracting
+comparably
+consultation's
+booster
+conspiracies
+belief
+candidate's
+boardinghouses
+connectivity
+check
+crazy
+collided
+assistant's
+critic
+bilateral
+cheapening
+appalled
+autopsy
+balled
+abnormally
+acquires
+aloofness
+backwaters
+combative
+computerizing
+craters
+contributorily
+behaved
+comers
+axiomatizations
+analogously
+banjo's
+cleanser
+capitalizes
+chamberlain
+aggregates
+amenorrhea
+begins
+condone
+cleaved
+bustard
+adsorb
+airedale
+bridles
+audited
+could
+amour
+checkbooks
+admiring
+arrested
+commerce
+asbestos
+can's
+clamping
+bathers
+acknowledgments
+census
+acrobat
+bargains
+apogee
+creaking
+busboy's
+additional
+chants
+circumvents
+afloat
+anyplace
+alumnae
+anions
+classroom's
+ballerina's
+convents
+angered
+climbers
+citation
+cools
+clamor
+capaciousness
+beatific
+abrades
+advocating
+coverings
+claims
+brethren
+advertised
+atrophies
+coffer
+beagle's
+brazenly
+bitterly
+clergyman
+braiding
+compressible
+convicting
+agreeableness
+antithesis
+cogently
+botanist's
+bidirectional
+bewilders
+airlock
+costumer
+blamelessness
+agglutinins
+catalyst's
+allocation
+annunciates
+borderings
+accomplishes
+confronters
+clinically
+breadbox's
+canvassed
+communicative
+coercing
+backpointer's
+bramble
+congregations
+crave
+courtesy's
+cocoon's
+admitting
+chieftains
+acclimate
+consequences
+cones
+contradict
+axolotls
+contractual
+artist
+atrociously
+consecutive
+berated
+bluing
+attacks
+choruses
+blatant
+balance
+amplifier
+assist
+analyst's
+ambler
+conveyance
+compromising
+baffler
+corridor
+bed's
+condoned
+boulevard
+anomie
+averages
+basics
+apologia
+cabbages
+concretes
+alcoholic
+aliased
+chocks
+balsam
+collies
+censor
+arouses
+conundrum's
+academically
+bent
+codings
+coastal
+allots
+acclaim
+citations
+cantor
+circularly
+boarder
+caribou
+biologist's
+cowling
+connects
+chasing
+bootstrap
+backscatter
+abstractly
+corrupt
+alleviating
+biasing
+abrade
+arraignment
+beaten
+blanketing
+compactness
+adage
+coincided
+borate
+bra's
+concepts
+bootleger
+christian
+argos
+basal
+abate
+campuses
+abridging
+confusers
+cabin's
+audition's
+amphibians
+attractively
+adhesive's
+ascendency
+beforehand
+ache
+brokers
+bowler
+criminally
+american's
+chock's
+artillerist
+appropriation
+characterization's
+artifices
+annoys
+constituents
+bottle
+beaned
+consisting
+beholding
+ceremony
+carpeted
+absolutely
+anorexia
+accredited
+azaleas
+amaze
+commit
+afflicting
+contriving
+adventure
+blood
+blabbing
+absoluteness
+appreciable
+approachers
+bumptious
+behavioristic
+anticipates
+adults
+barnyard's
+banging
+banana
+bilge's
+aware
+coheres
+bronchi
+commissioned
+arrogation
+confines
+core
+attenuation
+afterwards
+clearing
+applies
+alphabetized
+cemetery's
+campaigning
+abolishes
+brig
+cheer
+combers
+backtracker
+clinker
+clouds
+clog
+berries
+advising
+childish
+clobbered
+bride's
+astrophysics
+canker
+concatenate
+bite
+chagrin
+bodybuilders
+calamity
+admiralty
+councillors
+competitive
+assessments
+copper's
+cabling
+casket
+conducted
+backplane
+boyfriends
+bingo
+broader
+confiscates
+communicated
+baton
+cocktails
+albanians
+boardinghouse's
+brats
+akimbo
+categorizers
+comparator's
+blackbird's
+accidentally
+companion's
+clippings
+accosted
+bell's
+burly
+aggregations
+boathouses
+airmails
+abreactions
+changers
+carbon
+cleaners
+bookkeeping
+correlations
+backer
+conclusions
+brainstem's
+anecdotes
+chateau
+cogitating
+amphibious
+compounded
+completeness
+comptroller's
+boatswain's
+bolstered
+acquiescing
+actors
+calorie's
+adaptability
+abstractor
+bimolecular
+belly's
+automobile
+automotive
+analyticities
+awesome
+colonizer
+approximated
+chemist
+coronet's
+classmate
+anteater's
+altars
+adulthood
+amid
+assails
+blizzards
+corroborative
+biographer
+compartment
+blooded
+bipartisan
+bluff
+aloof
+bronchiole
+clincher
+congratulations
+ablation
+caught
+collier
+chooses
+antidotes
+artery
+clearance
+civility
+basketball
+auscultated
+behaviorally
+crowning
+autobiographical
+cheaply
+brutally
+agonizing
+clerk
+comprising
+baller
+confuses
+acquiesced
+astonishingly
+birthplace
+covered
+chopper
+combinator
+benignly
+bedside
+blasts
+billboard
+appraise
+aboveground
+comforter
+credulousness
+battlefield
+barefoot
+cleverness
+apparatus
+bartering
+bromine
+aerodynamic
+crabs
+chains
+airflow
+allegrettos
+armchairs
+blacklist
+approvals
+bait
+collections
+antecedent's
+airbags
+casted
+content
+conferrer's
+crouching
+coughs
+canal
+amphetamine
+augustly
+bedraggle
+arithmetic
+cataloger
+alluding
+credulity
+coffees
+crueler
+beautifully
+caresses
+correlative
+consul
+criticizing
+couched
+baths
+alchemy
+bargain
+accomplishments
+conveyer
+benevolence
+broil
+chilling
+axed
+attire
+collisions
+categorizes
+cited
+aeration
+accommodating
+coordinations
+boxcar
+cattle
+bullion
+afternoon's
+captures
+afghans
+comets
+component's
+ark
+bounds
+adjusting
+bravely
+capability
+chap
+absolving
+aspirating
+arcs
+conspires
+collaborated
+admonishment
+astounds
+brasses
+compromise
+changed
+consumers
+connoting
+buttonholes
+cordial
+anionic
+chastisers
+archive
+alleviate
+burglarize
+acquainted
+copiers
+cashers
+antisocial
+creations
+bookie's
+censure
+beadle's
+banded
+circled
+bulged
+cheapness
+attorney's
+chewer
+bookshelf's
+councillor
+assertion
+broom's
+contemplations
+club's
+balkans
+cherubs
+alas
+chair
+apologizes
+compartments
+beyond
+aptly
+censured
+allegros
+boosts
+card
+arithmetizes
+attainment's
+arrester
+anding
+asker
+compatibilities
+confidentially
+commissioning
+cleaner
+aversion
+cooperative
+battalion's
+cemented
+charity's
+conceited
+capable
+anymore
+computing
+aping
+chiefly
+affair
+beaners
+allying
+caption's
+antipathy
+causal
+abyss
+botchers
+burglarizing
+confidant's
+activator
+continent's
+census's
+brat's
+antagonism
+bedspring's
+antiserum
+charge
+connector's
+alike
+believable
+belfry
+cast's
+bureaus
+beneficiary
+abolisher
+artichoke's
+broadly
+concurrent
+alteration
+bookies
+crafts
+bays
+ass
+bouquet's
+ave
+chords
+crazes
+anemic
+appoint
+beets
+billing
+contest
+assassination
+allot
+brindled
+acute
+absolves
+adsorbed
+auxiliaries
+belatedly
+businesslike
+assassinates
+bookkeepers
+bevel
+adders
+automate
+archangels
+breakfasted
+changeability
+contested
+cradles
+combatants
+besieging
+certainty
+attempts
+bankrupting
+compiler's
+complications
+banquets
+ancestor's
+ail
+abbreviating
+compacter
+approvers
+acknowledges
+comically
+almonds
+counsellors
+calmness
+assailed
+crane's
+baser
+big
+corruption
+circuitry
+briefness
+community's
+banquetings
+alms
+bass's
+bellowing
+adoption's
+blockading
+compellingly
+builders
+befallen
+bombproof
+cartons
+chore
+crimson
+anther
+clucks
+assemblies
+beatitudes
+aspiration
+compels
+angst
+balancing
+bowstrings
+bayonet's
+butte
+biomedical
+casualness
+accolade
+blackberry's
+bunched
+affright
+clung
+burlesque
+bare
+corrected
+arbitrate
+cropping
+coherently
+bloodhound
+circularity
+courtesies
+articulating
+concluded
+analogy
+brutalized
+airmail
+cooperator
+cousins
+centralization
+bibbing
+beside
+bravo
+abductors
+cars
+bovines
+bump
+absconding
+chins
+chasers
+boundary's
+antecedents
+awed
+counselled
+aback
+attenuator's
+blazer
+bettered
+awaken
+abreast
+beagles
+artisans
+buckled
+credence
+control's
+bewhiskered
+calloused
+breathe
+collaring
+blossoms
+bring
+actualities
+bivalves
+animals
+cowboys
+constituency
+affordable
+acrobatic
+attiring
+boatswain
+concurrence
+abrasions
+babel's
+cowerers
+chiffon
+bostonian
+criterion
+blinds
+cased
+affections
+conditioners
+clutter
+accrued
+attractors
+botcher
+compunction
+bludgeoned
+censored
+allah's
+chronic
+burrs
+commodity's
+appraiser
+asserters
+cheaters
+besting
+anchorite
+combine
+afforded
+cigarette's
+bathrooms
+apostles
+chloroplast
+bootlegging
+bibliographical
+beans
+bylaw
+benefited
+brochure's
+cordially
+brashly
+beastly
+bologna
+alderman's
+burning
+billow
+convert
+buffaloes
+comparatives
+assistances
+camouflaged
+announcement
+bobwhite
+brawl
+adducted
+cavern's
+affectation's
+bandying
+brunette
+architect's
+aphorisms
+cremate
+bray
+billed
+conception
+battlefield's
+bandaged
+broaches
+bazaar's
+beatification
+bigotry
+clergy
+abstains
+befits
+bantering
+conceivable
+attachers
+analogies
+bimonthly
+august
+additionally
+confirmation's
+ballooning
+cardboard
+belle's
+counterparts
+candor
+bishop
+comprehension
+affronted
+bravura
+courting
+antidote
+buggies
+arisings
+appendix's
+bright
+categorize
+cooking
+agnostic's
+billets
+amok
+bewitching
+audiograms
+column's
+bussed
+checkbook
+alteration's
+atherosclerosis
+broached
+based
+cacti
+boardinghouse
+bowdlerized
+anchoritism
+achievement's
+bald
+cover
+codifications
+capacitor
+brashness
+causes
+acyclically
+argument
+boarders
+audiometer
+compute
+contribute
+crisply
+bitters
+circumvent
+assailant
+bosun
+buyers
+alibis
+blurting
+coasts
+bivouacs
+arrogating
+albanian
+attempted
+acquisitiveness
+applauding
+alfalfa
+cantors
+canonicalizes
+alkaloid
+bruising
+associativity
+budgetary
+carbolic
+clashing
+buffalo
+acorn
+analyzing
+backyards
+comedian
+betwixt
+aces
+chartered
+additivity
+becalm
+combat
+characterizations
+clinics
+bulbs
+bloc
+amenable
+civilian's
+breech
+attainment
+bounding
+compiler
+cotyledons
+billboard's
+caper
+aphasia
+chester
+combats
+biddable
+articulates
+caps
+assignees
+bifocals
+beady
+chinese
+assertions
+allegation
+championships
+accrue
+containment's
+croaking
+classifying
+annum
+brightened
+bits
+appointer
+besieger
+citizen's
+cerebral
+canto
+bakers
+capitol's
+authorizer
+blockaded
+anodizes
+alarmed
+buttressing
+attenuates
+bumptiously
+chronological
+colleges
+coward
+contraption's
+abstractions
+controversial
+boric
+bids
+agents
+backpointer
+bumped
+bottoms
+bowlines
+captivated
+article
+cliche's
+chases
+choker
+bremsstrahlung
+consult
+adjudged
+auctioneer's
+covers
+accurateness
+clues
+bugler
+bareness
+cedar
+alleviation
+anesthetically
+backpointers
+arched
+administered
+arrowhead
+continues
+asks
+confessor's
+allure
+backlogs
+childishness
+appointive
+covering
+conscience's
+bellows
+blanked
+considerations
+appalachian
+aerate
+budged
+city's
+accordion
+cliche
+collectors
+comprehensive
+boomed
+chariot
+baffling
+bunkmate's
+bumbles
+contaminating
+corroborating
+applications
+bursting
+cabbage
+befalling
+acquittal
+compromisers
+components
+arpeggio
+brothel's
+credibility
+begrudge
+confirmation
+academy
+appertains
+calibrates
+bureaucrats
+bawl
+costuming
+biography
+adoration
+cloaks
+aggregating
+business
+aphorism's
+carters
+admixture
+coexistence
+anomalously
+adapts
+amide
+affiliation
+capillary
+biscuit
+brainy
+bellhops
+chartings
+cohered
+austria
+champions
+basin's
+cascading
+consultants
+bison's
+admixed
+arithmetically
+clothed
+betterments
+conspirator's
+addition
+adolescence
+bolsheviks
+abominable
+breathless
+cozy
+arouse
+bumble
+about
+apace
+astronaut
+asteroid
+cable
+crab's
+beachhead
+assets
+analyses
+bisection
+coconuts
+alleys
+armament's
+bloodstains
+arpeggios
+apologist
+blithely
+anabaptist's
+beadle
+channelled
+confuse
+annoy
+beautifiers
+cheats
+clenches
+amuse
+bewail
+constitutional
+birth
+appendixes
+amazed
+berry's
+bilingual
+blustery
+amplification
+clogged
+blackmailing
+breakables
+adduct
+bondsmen
+conferred
+codewords
+bequeathal
+abundantly
+banner's
+atrocity
+congested
+closely
+absolution
+concatenations
+anarchic
+crag's
+communicators
+cavities
+comptrollers
+backstage
+bewailing
+charcoal
+conveyances
+collar
+bores
+briefest
+comments
+awning's
+associator's
+antarctica
+correspondingly
+bidden
+ad
+clings
+bit's
+apollo
+bulldogs
+chateau's
+amounting
+cogitates
+bellhop
+bookish
+bout's
+cannister
+bicep's
+asses
+beef
+battlefields
+consort
+auspicious
+breezy
+buried
+beverages
+approximates
+conduction
+bleakly
+blanketers
+ascertained
+absentminded
+bolivia
+births
+behave
+bilk
+breaths
+charter
+abstaining
+appareled
+boulder's
+breadwinner's
+correct
+accessed
+befitted
+adulterer
+axe
+activation
+betrothed
+asymptote
+bullet's
+clusterings
+baud
+bustling
+ballplayer
+constraining
+cleared
+brown
+affirmed
+agencies
+churches
+backyard
+burntness
+bronchioles
+charmers
+backscattered
+abridgment
+claw
+blow
+adjourning
+constantly
+brightens
+autobiography
+cards
+bypassing
+alcibiades
+concurrency
+chuckles
+bests
+belligerents
+adjustments
+bolshevik
+cabins
+astronomically
+cartridge
+boxcars
+boned
+bottomed
+burgeoned
+adjourned
+apprenticeship
+chastiser
+breached
+boycott
+butchered
+coordinating
+cottage
+brainwashing
+confinement
+bandies
+absentee
+collapses
+cruel
+along
+alloy
+convoying
+assignment's
+crisp
+ambidextrously
+blindfolded
+chilly
+condenses
+avers
+broiler
+anesthetics
+beaker
+cholera
+brag
+coffins
+cranked
+allocator
+brutality
+acquire
+blushing
+briar
+abolish
+crossovers
+broiling
+consolers
+beatify
+almanac's
+cooled
+commencements
+clasp
+committing
+condemnations
+altar
+by
+bombastic
+confederates
+bong
+concerted
+compilers
+counterproductive
+brig's
+accurate
+avidity
+cleavage
+blame
+conceive
+assessor
+consolingly
+concise
+computes
+alliance
+clucked
+axon's
+annunciating
+baseball's
+allusion
+brays
+auras
+blond's
+bronchitis
+ciphers
+blowing
+broth
+canonically
+baseness
+byline's
+appetite's
+colonists
+condensed
+cawing
+beaning
+broadening
+colonist's
+apocrypha
+chauffeured
+cored
+branding
+carrier
+assessed
+collegiate
+chirped
+accounted
+clubbed
+antibodies
+behalf
+alphabetizing
+conqueror
+alpine
+budgeters
+casements
+appropriate
+compliments
+cast
+accountancy
+cathedral
+conserve
+accorders
+arbitrarily
+cowing
+bars
+bagel's
+climax
+attention's
+cautioning
+centipede's
+almost
+abstractionist
+carpenter
+containing
+arab's
+courtesy
+carton
+accelerated
+bowman
+boastings
+banal
+bucking
+accomplishment's
+classification
+baldly
+abruptness
+calibrations
+blocs
+biking
+assenter
+adversities
+compartmentalized
+chemical
+attic
+audiogram's
+applauds
+crests
+bad
+bounce
+accelerators
+contemptuous
+attentions
+cancellation
+battles
+aging
+advantages
+anthologies
+answers
+bruised
+castes
+any
+coped
+arcade's
+adaptively
+arsenal's
+confessed
+controllability
+acceptor's
+abrogated
+abutted
+amusingly
+apology
+broils
+court
+boundaries
+bode
+collie
+adiabatic
+ambitions
+charged
+awfulness
+consorts
+botanists
+blurring
+absents
+batten
+backwoods
+breaks
+certified
+chattering
+admitted
+bathrobe's
+analogous
+corporacy
+bijection's
+combatant
+checked
+condition
+amoral
+bayed
+bedroom
+chanting
+antics
+charity
+blip's
+biped
+brilliance
+catchers
+booted
+anabaptist
+clothe
+comforted
+complaints
+coacher
+admissible
+bang
+concisely
+cookery
+capita
+assurance
+codifying
+benchmarks
+aunts
+commentaries
+anon
+applicators
+constructor
+associated
+abuses
+choicest
+confiding
+antislavery
+apron
+ashore
+cheerfully
+betterment
+administration's
+campaign
+cremated
+ambulatory
+bleacher
+afterthought
+barkers
+choir
+crossly
+conducive
+cache's
+battery
+actinium
+countryman
+cajoled
+appeasing
+beamer
+cleaves
+anthem's
+clearing's
+cooperated
+barker
+crowing
+apprising
+accusation's
+beginning
+associator
+booking
+caved
+amicable
+codify
+clairvoyant
+bevels
+becalms
+brawn
+bunkhouse's
+arms
+antiredeposition
+belt
+antiphonal
+cried
+brae's
+bridal
+acronym
+clay's
+checkers
+auxiliary
+bind
+compares
+agilely
+askers
+blankly
+antagonist's
+bimodal
+captivation
+creditable
+concentration
+calling
+bartender's
+autopsied
+correspondent's
+carnivals
+abjure
+bystander's
+bungle
+chanticleers
+conceding
+burghers
+boards
+accessions
+compensations
+arabian
+churn
+crowed
+centering
+abnormalities
+courtier's
+congregation
+aberrant
+annexing
+blockhouse
+anthropomorphic
+bedder's
+abutting
+conundrums
+affiliated
+cancellation's
+bolts
+ballgowns
+augmenting
+bureaucracy's
+bootlegged
+audiometers
+blueberry
+affliction
+appreciation
+codifier
+amasses
+countering
+crackle
+canoe
+consuls
+breathes
+broiled
+amalgam's
+bodes
+ballooners
+coating
+corollaries
+amphibology
+agenda's
+chafing
+alcoholics
+accredit
+anisotropy
+anchovies
+carriers
+acceptors
+betrayed
+buttocks
+busy
+bunny
+cropper
+accreditations
+bumblebee's
+adhesives
+civilize
+accedes
+abroad
+arch
+crept
+cotyledon
+alphabetic
+braille
+amateur
+adjure
+ascertaining
+budge
+adulterate
+additive's
+cardiac
+born
+brewed
+borneo
+bun's
+blue
+cackled
+acclimates
+airline
+blinder
+brokerage
+communicant
+central
+aggrieved
+asynchronous
+bough's
+acidly
+archaeology
+complementary
+animator's
+bodyguards
+climbs
+apathy
+constellation's
+acculturate
+archaeologists
+contingents
+control
+anglophilia
+billings
+corporate
+athlete
+accusing
+appear
+announcing
+accordions
+computerize
+combinations
+bile
+abut
+charger
+columnize
+computer
+blacks
+converges
+blamer
+bulked
+convincingly
+checker
+correspondence's
+accelerate
+accessible
+conceivably
+abscissa's
+adsorbs
+anglophobia
+anomic
+casters
+churning
+crease
+brood
+appendage
+bulwark
+bombers
+arcaded
+breadboard's
+aphrodite
+color
+commodore's
+answerer
+bobolink
+cloth
+conversion
+clime
+artery's
+birthplaces
+compiled
+arrack
+beetles
+bobs
+compatibility
+cocoon
+counterpart
+audible
+colonies
+airport's
+beige
+cogent
+bromide
+begrudging
+acids
+crucifies
+beggary
+archipelagoes
+availably
+counterfeiter
+blanketed
+amending
+accelerometer's
+advisors
+byway
+alignment
+amber
+austin
+copyrights
+beaus
+brigantine
+comforts
+appointment's
+crawler
+bangles
+contemplation
+concur
+characterizing
+censoring
+charters
+catalogues
+appropriately
+builds
+aeronautic
+confused
+comber
+axially
+cackler
+coercive
+ambassador
+arcades
+brash
+amorality
+belittling
+battling
+bloodied
+acrylic
+bantered
+clasped
+carcass
+archangel
+annunciators
+aristotle
+boulder
+burglarproofs
+chooser
+abilities
+calmest
+bach
+always
+blaspheming
+crossover
+bakeries
+clocks
+ankle's
+accidental
+arbitration
+chirp
+aeronautical
+boy's
+acidic
+bowline
+anonymously
+cod
+couplers
+beautifications
+bluffing
+backarrows
+brow
+covenant
+acronym's
+banning
+albeit
+ascetic
+burn
+animator
+beatnik's
+coveted
+cipher's
+broke
+cap
+bellman
+bulldozed
+clarifies
+bathes
+blip
+availabilities
+booth
+clangs
+audiences
+cathedrals
+confounding
+bigot's
+beecher
+arts
+company
+attributed
+avenged
+bawling
+caustics
+alee
+bordello's
+banks
+affords
+complied
+commas
+collaborate
+aquatic
+ambitiously
+burro's
+beard
+bittersweet
+candlestick
+bylaws
+broadcastings
+believe
+barrels
+braying
+certifications
+contrasts
+crashes
+audition
+confine
+bucks
+abates
+bureaucracy
+ambles
+besiege
+broccoli
+antibiotics
+attenuators
+accelerometer
+caste
+bib's
+browbeaten
+appurtenance
+bauxite
+asceticism
+case
+chewing
+aerator
+achievements
+barricade's
+agglutinates
+bewildering
+cartridge's
+children
+bufferrer
+actuator
+converging
+bolted
+chat
+combs
+chemist's
+adduced
+algebraic
+circular
+bloated
+conclusion
+burgess
+certifies
+absconds
+comprise
+benzedrine
+bumbler
+banjo
+allow
+appealing
+cooperation
+abraded
+chaperoned
+biracial
+braced
+censurer
+acoustician
+appraised
+benefitting
+constructs
+convertible
+administrative
+asocial
+area
+creature
+besetting
+crater
+begrudgingly
+blanket
+ablest
+alba
+airplanes
+allowing
+briefly
+beneficences
+concurring
+adjective's
+cork
+aerospace
+anomalies
+asher
+auger's
+boilers
+abhorring
+broadenings
+bladder
+belay
+approver
+abdominal
+commends
+cringing
+billiards
+beater
+auspice
+contrasters
+bights
+absentees
+atoll
+cooler
+activator's
+basement
+burgeon
+allusiveness
+codeword's
+bandage
+contemplate
+adopted
+coping
+carving
+baptism
+colds
+altos
+background
+closet
+commuted
+acre's
+aliens
+council
+cans
+cheese
+ally
+aseptic
+belgian's
+crossbar
+addressed
+commons
+call
+careers
+breakfasting
+brazilian
+catholics
+bachelors
+consultant
+brighter
+crossword's
+burglar
+avoidable
+batting
+cigar
+amps
+axiological
+combed
+comforters
+albumin
+cookies
+booming
+archaize
+canton's
+bunkmate
+combination
+bondsman
+anxiously
+affixed
+associatively
+cigar's
+backstitch
+calls
+captivates
+commodities
+atmosphere's
+asserting
+beaver
+beatnik
+container
+activists
+consoler
+commoner
+buttonhole's
+abhorred
+aggregate
+cliff
+antidisestablishmentarianism
+broach
+ambling
+comer
+bited
+advocated
+behaves
+bosom
+continents
+conserves
+bashful
+ago
+backarrow
+circumventable
+avocados
+briar's
+annuls
+barnstorming
+aired
+carry
+crossbar's
+aspire
+beards
+abides
+cliques
+completes
+brassiere
+absorbs
+annul
+chairman
+baron
+battens
+africans
+abatement
+colonization
+carries
+borough
+allurement
+breakfasters
+alkali
+acoustically
+corners
+capturer
+casualties
+asphyxia
+animized
+administrator
+belying
+basketballs
+bylines
+bandit
+autopsies
+braining
+contradiction's
+antic
+butted
+bacillus
+blurt
+conditioned
+backers
+agreeable
+almanacs
+cider
+chicken
+chambers
+clutch
+assailant's
+conveyers
+amazers
+beribboned
+breeder
+caveat's
+buffers
+combination's
+ampersand's
+crafting
+clanged
+caving
+aspirant
+butlers
+adjective
+auckland
+announced
+creators
+caches
+baseline
+codifies
+baptism's
+coarsened
+cohesion
+airman
+avenge
+backaches
+budgeted
+armpit
+bicycled
+converged
+besmirched
+autonomic
+coming
+assemblage's
+chained
+admissions
+alcoholic's
+branches
+bunk
+anciently
+bloods
+adventurers
+amazes
+coloring
+abstractors
+adaptation's
+boar
+amulet
+agglutination
+conquerable
+booker
+confronts
+barometer's
+bedbugs
+barricades
+cheap
+bewitch
+circus
+backward
+archeology
+automobiles
+bending
+amino
+beckoning
+admits
+berliners
+borer
+clambering
+atomizing
+banner
+blissfully
+catchable
+breakdown
+abjured
+computerized
+chaplain's
+amphitheater
+ballot's
+craziness
+croaks
+counties
+adopting
+breast
+airstrip's
+basin
+contemplating
+commitments
+critique
+appears
+bellies
+baccalaureate
+abducted
+blackened
+animosity
+appraising
+antiquity
+assistants
+asthma
+bootstrapping
+bounties
+agleam
+advertisements
+benches
+artful
+broadens
+chuck's
+betrayal
+blasphemed
+brooms
+castled
+coroutine
+conscious
+beetle
+banshee
+advertising
+baring
+awakens
+balm
+billions
+compromisingly
+ballroom's
+burrower
+bayou's
+ambiance
+beheading
+bought
+adagios
+adornment's
+anointed
+abolishment's
+anesthetizes
+badly
+boyishness
+consultant's
+cheek
+cannibals
+breakdowns
+assured
+agates
+bicker
+appliances
+cafe
+bagpipes
+adrenal
+combinatorially
+belligerence
+bricked
+adjacency
+aimless
+crook
+cherry's
+assessing
+brushfire
+cormorant
+captained
+blundered
+conceptually
+congress's
+contraster
+ambushes
+bronze
+autotransformer
+corded
+brisker
+contently
+announcements
+bullet
+apportionments
+columnized
+canon
+conservation
+algaecide
+blackening
+compassion
+beaks
+constructibility
+chapter
+abscond
+costly
+bacon
+coldest
+aptness
+billionth
+altercation
+approbation
+alternator's
+criticizes
+befell
+canopy
+buoyant
+brazil
+anticipate
+absenteeism
+champion
+aesthetics
+cadence
+betroth
+confidants
+bean
+braid
+aphids
+cluttering
+cantankerously
+bloom
+barbarity
+clawing
+bogged
+agreed
+asia
+abrasion
+corporals
+baselines
+box
+chartering
+apotheosis
+ampersands
+conceit
+creamer
+adhered
+circuit
+carpet
+accompaniments
+boomerangs
+blindness
+chipmunks
+bewitched
+allocate
+bicycle
+compacted
+cab
+calcium
+cellists
+apex
+borrows
+completed
+brightly
+constables
+ascertains
+conspiracy's
+badgers
+bunion's
+anabaptists
+broadband
+clefts
+accepted
+benched
+catalogued
+cadenced
+alliteration
+acquiesces
+boxcar's
+athlete's
+bracing
+cremations
+analysis
+crossings
+assorts
+apologize
+brazier
+configurable
+basking
+craves
+belle
+conversation's
+belligerent
+anesthetize
+brewers
+cackles
+adventures
+airlock's
+booklet's
+apply
+anecdotal
+bewails
+computer's
+autographs
+acclimated
+coefficients
+avidly
+beckoned
+broadener
+bulk
+blacklisting
+belly
+acquit
+convoy
+achiever
+aversions
+advisor's
+captor's
+camel's
+asset's
+advantageous
+basement's
+confident
+crescents
+compiling
+butler's
+cartoon's
+adaptive
+chlorine
+abets
+cruelly
+amiable
+baleful
+ceiling's
+adumbrated
+cherry
+aspirant's
+cashing
+candidly
+chaff
+bitter
+brim
+alcove
+bulb's
+carbonizers
+citizen
+attic's
+breed
+consumer
+conferrers
+accommodations
+contrapositive
+beget
+brilliantly
+attentionality
+continuation's
+bosses
+brave
+configurations
+benediction's
+conferring
+accessor's
+bobolinks
+bulled
+cleanness
+algorithm
+advancements
+altogether
+accumulations
+albacore
+bowing
+belching
+apical
+consequentiality
+bagpipe's
+ambrosial
+bullying
+cleans
+attendance's
+complimenter
+blink
+cager
+assembling
+coat
+allowable
+astringent
+antiresonator
+cardinal
+clicks
+commentator's
+blossom
+categorizing
+amphibian's
+commonality
+consonant
+classics
+affable
+accorded
+aimlessly
+archetype
+administerings
+boldness
+anatomy
+apprehensively
+absence's
+actuality
+attempting
+categorical
+checkpoints
+allemande
+corer
+behoove
+bleaches
+bough
+blended
+blotting
+baptists
+courtship
+benevolent
+bumptiousness
+chum
+anguished
+auto
+career
+bookstore's
+carbonized
+autocratically
+cherishes
+attendees
+contends
+anastomotic
+attributing
+abbot
+came
+blunt
+battlement's
+affection
+coordination
+annotate
+besets
+bucked
+boasting
+benedictions
+adherent
+blimp's
+acknowledging
+cleverly
+applejack
+annexation
+bat's
+cantons
+beetled
+closed
+country
+creatively
+bakery
+blasphemously
+chalking
+bold
+attended
+crasher
+backtrackers
+artist's
+bracelet's
+allowably
+affiliating
+arrant
+brayed
+barbells
+consigned
+abolishers
+climatic
+atrophying
+amigo
+arsenal
+ascribes
+converses
+aura's
+allotted
+bliss
+classical
+bigger
+ahead
+chopped
+blade
+casualty
+acceded
+bottling
+axon
+casement's
+battlefront's
+convinces
+alerting
+advertisers
+blemish
+agglutinating
+commonplaces
+autocorrelation
+armistice
+crediting
+besmirch
+amplify
+auscultation
+befalls
+called
+alnico
+arbiter's
+abort
+argonauts
+cessations
+cribs
+blare
+aforementioned
+condemners
+contaminated
+complained
+bootstrapped
+criticism
+cooperatively
+binding
+bullies
+basins
+contrived
+assort
+adulterously
+booms
+abandons
+also
+appealed
+count
+contributed
+beet
+crashers
+carryovers
+clays
+blackness
+cosmetics
+awkward
+blurted
+bothers
+analyzer
+backups
+alarming
+bicyclers
+credit
+abrogate
+audience's
+architecturally
+alibi's
+complicator's
+chuckle
+corporately
+banishment
+communist's
+birdie
+asymptotic
+break
+braze
+benzene
+bridgework's
+beak
+agitators
+collateral
+arranges
+bayonet
+breathlessly
+counsellor
+creates
+convulsions
+backdrops
+applicants
+altercation's
+commission
+breathtakingly
+corresponds
+backdrop
+armaments
+build
+biannual
+buttoning
+computational
+chaired
+bather
+critically
+amanuensis
+bantus
+confidential
+annoyance's
+carder
+authorizing
+acquits
+bipeds
+cocktail
+cinnamon
+burros
+brocade
+abdomen's
+creative
+acquisition's
+abdomen
+baited
+aristocratically
+alive
+committed
+arrestor
+cleaving
+comedy's
+baggage
+bra
+adaptors
+afoot
+bulls
+contoured
+amalgam
+comprehensibility
+amortizes
+biographical
+confront
+covert
+cravat
+animates
+booksellers
+bypass
+bootleggers
+bedfast
+affair's
+buzzer
+bellowed
+aligning
+bystander
+acclimatized
+accomplishing
+against
+blankness
+adopt
+addressing
+croaked
+boaters
+behooves
+audits
+boatyard
+cruise
+agnostics
+ailing
+anchorage's
+adaptations
+conceptualize
+advised
+cries
+bank
+actuators
+brazing
+catalyst
+beachheads
+aplomb
+compressed
+amputated
+contractor's
+bedspreads
+bowed
+coon
+chaplain
+cannons
+coffers
+assembly
+bouffant
+converters
+ampoule's
+borderland
+archaeologist
+blankets
+conserving
+avalanche
+assortment's
+aspic
+axle
+bereaves
+allowance
+carbonization
+bartender
+clawed
+coincidental
+appeared
+chipmunk's
+countable
+authenticators
+bestow
+alps
+caw
+aniseikonic
+avows
+blackmails
+controlling
+correlating
+audiologist's
+bit
+approving
+collapse
+coon's
+cleave
+atheists
+brigade
+autopilots
+bounteous
+commercialness
+accede
+cavalierness
+accustoming
+burnishing
+clobber
+aspirates
+brochures
+cellar's
+communes
+berkelium
+chickadee
+cobweb
+circumstances
+chose
+comprehend
+baritone's
+aggravation
+adopts
+cruelty
+and
+axer
+cautioned
+carbonic
+babbles
+bet
+charitable
+computable
+cardinality
+amenities
+confiscating
+catcher
+audaciousness
+complaint's
+cooperator's
+buddies
+baking
+constant
+classmate's
+accentuate
+choices
+crop's
+authorization's
+comedy
+brushy
+brotherly
+canals
+ads
+causeway
+abrading
+cemetery
+autocrat
+briefing
+abdomens
+apparition's
+consummately
+alkaloids
+bulkheads
+cravats
+bales
+campaigners
+bagpipe
+accentuates
+arm
+barometric
+bas
+agitator
+behavior
+abutters
+blockades
+alertness
+civilizes
+chinner
+anthropologist
+artificialness
+balkanize
+automates
+cackling
+anarchists
+amounted
+cereal's
+anodized
+cobblers
+acknowledgment's
+blear
+copper
+alphabetics
+blackboards
+apish
+answering
+afternoon
+arbors
+accused
+chickens
+agency's
+contractors
+contraptions
+cosmology
+anomaly
+bandstand
+attempter
+account
+challengers
+admiration
+calculations
+autocracy
+analyticity
+accord
+buildup's
+commonly
+babbling
+adjudication's
+attain
+ameliorating
+candlestick's
+chronicles
+align
+consensus
+agate
+adulation
+aspirated
+conclusive
+biologists
+cracks
+conform
+chambered
+beryllium
+connote
+amusing
+aquifer
+ankle
+batteries
+conservationists
+accountants
+apiaries
+actinometer
+beckon
+clearances
+clouded
+antitoxin's
+consolation's
+collectives
+boxtops
+bombarded
+bombarding
+bluest
+allusion's
+construction
+ballpark's
+codified
+coincidence
+celebration
+chip
+beginner's
+algerian
+boo
+athletics
+condenser
+bytes
+beauties
+concerts
+conductors
+awl's
+agitations
+buttered
+codifier's
+armory
+ascii
+aspirin
+arthritis
+bylaw's
+conformity
+blasting
+coinciding
+aphid's
+ceremonial
+banisters
+bristle
+bid's
+buckboard's
+bandied
+biopsy
+ballrooms
+chloroplasts
+bidding
+boil
+algebra
+constellation
+chuck
+cringes
+cleanliness
+apron's
+cosmopolitan
+bashes
+abusive
+believer
+conductor
+butters
+breweries
+allotment
+artfulness
+bunkmates
+blares
+connections
+anticipated
+classifies
+commandments
+beginnings
+bend
+brambles
+blacked
+basketball's
+affectionate
+cocoa
+anacondas
+busing
+bone
+birchen
+creamed
+aged
+commemorates
+brother
+aberration
+crawl
+actuarial
+apology's
+alumnus
+adversary's
+anaphoric
+aspiring
+consciousness
+cokes
+assignee
+boxing
+blanched
+camels
+contemporaries
+carnivorous
+assigned
+apologetically
+corpus
+accusations
+beefing
+champaign
+claps
+adherence
+aloft
+complication
+citizenship
+becomes
+compound
+arabesque
+bronchiole's
+appraises
+breach
+collection's
+botched
+bitches
+biblically
+bronchial
+amalgamating
+commoner's
+barbarian's
+arrange
+cradle
+conformed
+complimentary
+anodes
+cowering
+anoint
+brocaded
+bedazzling
+avionics
+burnishes
+bulkhead
+chink
+consciously
+contract
+clinch
+applicant's
+awning
+aloud
+chandelier's
+cathode's
+babble
+arachnid
+biplane
+clamorous
+assuredly
+consented
+axing
+avenger
+commence
+braving
+brandishing
+careless
+burningly
+boatsman
+channelling
+clarifying
+beggar
+berates
+cite
+cowered
+buffer
+condescending
+admixes
+bettering
+bedazzlement
+cord
+burglary's
+characteristics
+aptitudes
+adieu
+agree
+bends
+ceremonies
+accustom
+accessibly
+commanders
+ask
+cavalier
+brayer
+affront
+courser
+becoming
+carves
+configures
+beasts
+biters
+conditionals
+bodybuilding
+accretions
+chapter's
+cleverer
+corning
+brat
+classes
+almsman
+consumptive
+antique
+comprised
+beholders
+anthropologically
+buns
+bridge
+accretion
+acceptance's
+confederacy
+armorer
+argumentative
+crossword
+cowslip's
+analog
+counselor
+chastised
+barters
+clerked
+americas
+cloud
+aide
+alternators
+admitters
+bagatelle
+bridges
+civilizations
+anion's
+briton's
+apartment
+acquaints
+consummation
+chord
+coated
+barer
+carnivorously
+cheering
+allergy
+capacity
+classrooms
+assistantships
+complimented
+amphibiously
+commandment's
+audiogram
+corked
+badness
+bewildered
+assemblage
+backplane's
+asterisk's
+blob
+coexisting
+approximations
+counteractive
+barns
+adherer
+aborigine's
+brooding
+conceived
+adjustor
+cabled
+belongings
+breadwinner
+blot's
+brightness
+consigning
+barflies
+bisector's
+basing
+complement
+conditioner
+brazes
+crank
+antinomian
+crowd
+accelerometers
+befitting
+backlash
+bastions
+acceleration
+briefcases
+correlated
+baffle
+chew
+accosts
+agreeably
+bassinets
+cogitate
+concerning
+contouring
+broadside
+compact
+brainstems
+atom's
+bondage
+biter
+archdioceses
+basis
+bellboy
+blobs
+barons
+clods
+campaigned
+assessors
+bubbles
+annal
+casual
+altercations
+clog's
+biased
+arianism
+ancillary
+collaborator
+butter
+bureau
+blending
+antiquities
+brands
+activism
+crews
+beats
+broad
+buds
+baggers
+cobbler's
+condemns
+cabinets
+bomber
+blinders
+center
+contacted
+bewilderingly
+circulates
+burnings
+achieved
+belch
+barbecue
+angles
+comparative
+befuddle
+cherished
+chapters
+chanter
+allegation's
+armstrong
+converter
+combinatoric
+angrier
+brooks
+clinked
+blubber
+appointments
+compactor
+cleaned
+car
+contention's
+artificial
+cramp
+consistency
+aborting
+collaboration
+awarders
+crippled
+anaphora
+creamy
+buoyed
+baptistery
+altered
+anchoring
+alterer
+adjuring
+beacon's
+commencement's
+ascension
+candidness
+clouding
+cigars
+boiled
+christmas
+contingency's
+alum
+apparel
+contributors
+anisotropic
+annotations
+bushwhacks
+brides
+continuities
+carton's
+blurred
+antibody
+aorta
+blankest
+combinator's
+banish
+breaches
+accumulates
+bowling
+braver
+antibacterial
+cooperators
+banked
+compensated
+chartable
+conjunctively
+antelope's
+bluefish
+annoying
+composed
+barges
+biconcave
+australia
+ballparks
+bearers
+acknowledged
+advocates
+crossed
+competitor
+blaming
+andorra
+baritone
+collaborator's
+accessibility
+complains
+commentator
+bibliography
+conference's
+atmosphere
+agrees
+bedstead's
+ardor
+character's
+conventionally
+arena's
+chokes
+channel
+bludgeon
+convoys
+condense
+beautifier
+ailerons
+compacts
+black
+bell
+completions
+ballroom
+besotting
+conservatives
+adventured
+bulldog's
+conversely
+arroyos
+compositional
+alternative
+association
+broods
+beefy
+consolidated
+balms
+acquaint
+animal
+certificate
+combustion
+aims
+cracker
+abetted
+cautionings
+bread
+attains
+agriculturally
+courtyards
+bawls
+country's
+creator's
+checkbook's
+cliches
+colonizing
+biennial
+aqueous
+craftsman
+contrivances
+algorithmic
+crate
+barefooted
+bodily
+anthropologist's
+but
+climate's
+campers
+crackled
+awakes
+conveyed
+borrowers
+approached
+avoids
+crib
+albania
+bathrobe
+admonitions
+architectures
+consenting
+anastomosis
+blob's
+actual
+arrowhead's
+accountable
+allegiances
+commendation
+appearers
+comply
+concurs
+controversy
+abstracting
+artifact
diff --git a/storage/bdb/test/wrap.tcl b/storage/bdb/test/wrap.tcl
new file mode 100644
index 00000000000..aaceb4f74e6
--- /dev/null
+++ b/storage/bdb/test/wrap.tcl
@@ -0,0 +1,71 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: wrap.tcl,v 11.6 2002/04/25 13:35:02 bostic Exp $
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
+
+source ./include.tcl
+source $test_path/testutils.tcl
+
+# Arguments:
+if { $argc < 3 } {
+ puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
+ exit
+}
+
+set script [lindex $argv 0]
+set logfile [lindex $argv 1]
+set args [lrange $argv 2 end]
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set t [open "|$tclsh_path >& $logfile" w]
+
+# Create a sentinel for the subprocess.
+set childpid [pid $t]
+puts "Script watcher process $parentpid launching $script process $childpid."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+puts $t "source $test_path/test.tcl"
+puts $t "set script $script"
+
+# Set up argv for the subprocess, since the args aren't passed in as true
+# arguments thanks to the pipe structure.
+puts $t "set argc [llength $args]"
+puts $t "set argv [list $args]"
+
+puts $t {set ret [catch { source $test_path/$script } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+
+set ret [catch {close $t} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
+exit $ret
diff --git a/storage/bdb/txn/txn.c b/storage/bdb/txn/txn.c
new file mode 100644
index 00000000000..78c54791d06
--- /dev/null
+++ b/storage/bdb/txn/txn.c
@@ -0,0 +1,1428 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn.c,v 11.179 2002/08/29 17:41:17 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <stdlib.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+#define SET_LOG_FLAGS(dbenv, txnp, lflags) \
+ do { \
+ lflags = DB_COMMIT | DB_PERMANENT; \
+ if (F_ISSET(txnp, TXN_SYNC)) \
+ lflags |= DB_FLUSH; \
+ else if (!F_ISSET(txnp, TXN_NOSYNC) && \
+ !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) { \
+ if (F_ISSET(dbenv, DB_ENV_TXN_WRITE_NOSYNC)) \
+ lflags |= DB_WRNOSYNC; \
+ else \
+ lflags |= DB_FLUSH; \
+ } \
+ } while (0)
+
+/*
+ * __txn_isvalid enumerated types. We cannot simply use the transaction
+ * statuses, because different statuses need to be handled differently
+ * depending on the caller.
+ */
+typedef enum {
+ TXN_OP_ABORT,
+ TXN_OP_COMMIT,
+ TXN_OP_DISCARD,
+ TXN_OP_PREPARE
+} txnop_t;
+
+static int __txn_begin_int __P((DB_TXN *, int));
+static int __txn_end __P((DB_TXN *, int));
+static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, txnop_t));
+static int __txn_set_timeout __P(( DB_TXN *, db_timeout_t, u_int32_t));
+static int __txn_undo __P((DB_TXN *));
+
+#ifndef db_create
+/*
+ * txn_abort --
+ * txn_begin --
+ * txn_commit --
+ *
+ * When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+ * were the interfaces applications would likely use and not be willing to
+ * change, due to the sheer volume of the calls. Provide wrappers -- we
+ * could do txn_abort and txn_commit using macros, but not txn_begin, as
+ * the name of the field is txn_begin, we didn't want to modify it.
+ *
+ * The issue with txn_begin hits us in another way. If configured with the
+ * --with-uniquename option, we use #defines to re-define DB's interfaces
+ * to unique names. We can't do that for these functions because txn_begin
+ * is also a field name in the DB_ENV structure, and the #defines we use go
+ * at the end of the db.h file -- we get control too late to #define a field
+ * name. So, modify the script that generates the unique names #defines to
+ * not generate them for these three functions, and don't include the three
+ * functions in libraries built with that configuration option.
+ *
+ * EXTERN: int txn_abort __P((DB_TXN *));
+ * EXTERN: int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ * EXTERN: int txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->abort(txnp));
+}
+
+int
+txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ return (dbenv->txn_begin(dbenv, parent, txnpp, flags));
+}
+
+int
+txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ return (txnp->commit(txnp, flags));
+}
+#endif /* !db_create */
+
+/*
+ * __txn_begin --
+ * This is a wrapper to the actual begin process. Normal transaction
+ * begin allocates a DB_TXN structure for the caller, while XA transaction
+ * begin does not. Other than that, both call into common __txn_begin_int
+ * code.
+ *
+ * Internally, we use TXN_DETAIL structures, but the DB_TXN structure
+ * provides access to the transaction ID and the offset in the transaction
+ * region of the TXN_DETAIL structure.
+ *
+ * PUBLIC: int __txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ */
+int
+__txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+ DB_TXN *txn;
+ int ret;
+
+ *txnpp = NULL;
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_begin", DB_INIT_TXN);
+
+ if ((ret = __db_fchk(dbenv,
+ "txn_begin", flags,
+ DB_DIRTY_READ | DB_TXN_NOWAIT |
+ DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv,
+ "txn_begin", flags, DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0)
+ return (ret);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+ txn->flags = TXN_MALLOC;
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(txn, TXN_DIRTY_READ);
+ if (LF_ISSET(DB_TXN_NOSYNC))
+ F_SET(txn, TXN_NOSYNC);
+ if (LF_ISSET(DB_TXN_SYNC))
+ F_SET(txn, TXN_SYNC);
+ if (LF_ISSET(DB_TXN_NOWAIT))
+ F_SET(txn, TXN_NOWAIT);
+
+ if ((ret = __txn_begin_int(txn, 0)) != 0)
+ goto err;
+
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ if (LOCKING_ON(dbenv)) {
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ if (parent != NULL) {
+ ret = __lock_inherit_timeout(dbenv,
+ parent->txnid, txn->txnid);
+ /* No parent locker set yet. */
+ if (ret == EINVAL) {
+ parent = NULL;
+ ret = 0;
+ }
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Parent is NULL if we have no parent
+ * or it has no timeouts set.
+ */
+ if (parent == NULL && region->tx_timeout != 0)
+ if ((ret = __lock_set_timeout(dbenv, txn->txnid,
+ region->tx_timeout, DB_SET_TXN_TIMEOUT)) != 0)
+ goto err;
+ }
+
+ *txnpp = txn;
+ return (0);
+
+err:
+ __os_free(dbenv, txn);
+ return (ret);
+}
+
+/*
+ * __txn_xa_begin --
+ * XA version of txn_begin.
+ *
+ * PUBLIC: int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+ */
+int
+__txn_xa_begin(dbenv, txn)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+{
+ PANIC_CHECK(dbenv);
+
+ memset(txn, 0, sizeof(DB_TXN));
+
+ txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+
+ return (__txn_begin_int(txn, 0));
+}
+
+/*
+ * __txn_compensate_begin
+ * Begin an compensation transaction. This is a special interface
+ * that is used only for transactions that must be started to compensate
+ * for actions during an abort. Currently only used for allocations.
+ *
+ * PUBLIC: int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp));
+ */
+int
+__txn_compensate_begin(dbenv, txnpp)
+ DB_ENV *dbenv;
+ DB_TXN **txnpp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+ txn->flags = TXN_MALLOC;
+ F_SET(txn, TXN_COMPENSATE);
+
+ *txnpp = txn;
+ return (__txn_begin_int(txn, 1));
+}
+
+/*
+ * __txn_begin_int --
+ * Normal DB version of txn_begin.
+ */
+static int
+__txn_begin_int(txn, internal)
+ DB_TXN *txn;
+ int internal;
+{
+ DB_ENV *dbenv;
+ DB_LSN begin_lsn, null_lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *td;
+ size_t off;
+ u_int32_t id, *ids;
+ int nids, ret;
+
+ mgr = txn->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+
+ /*
+ * We do not have to write begin records (and if we do not, then we
+ * need never write records for read-only transactions). However,
+ * we do need to find the current LSN so that we can store it in the
+ * transaction structure, so we can know where to take checkpoints.
+ *
+ * XXX
+ * We should set this value when we write the first log record, not
+ * here.
+ */
+ if (DBENV_LOGGING(dbenv))
+ __log_txn_lsn(dbenv, &begin_lsn, NULL, NULL);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted during recovery");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /* Make sure that we aren't still recovering prepared transactions. */
+ if (!internal && region->stat.st_nrestores != 0) {
+ __db_err(dbenv,
+ "recovery of prepared but not yet committed transactions is incomplete");
+ ret = EINVAL;
+ goto err;
+ }
+
+ /*
+ * Allocate a new transaction id. Our current valid range can span
+ * the maximum valid value, so check for it and wrap manually.
+ */
+ if (region->last_txnid == TXN_MAXIMUM &&
+ region->cur_maxid != TXN_MAXIMUM)
+ region->last_txnid = TXN_MINIMUM - 1;
+
+ if (region->last_txnid == region->cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->maxtxns, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (td = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail))
+ ids[nids++] = td->txnid;
+ region->last_txnid = TXN_MINIMUM - 1;
+ region->cur_maxid = TXN_MAXIMUM;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->last_txnid, &region->cur_maxid);
+ __os_free(dbenv, ids);
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __txn_recycle_log(dbenv, NULL,
+ &null_lsn, 0, region->last_txnid, region->cur_maxid)) != 0)
+ goto err;
+ }
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for transaction detail");
+ goto err;
+ }
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ id = ++region->last_txnid;
+ ++region->stat.st_nbegins;
+ if (++region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
+
+ td->txnid = id;
+ td->begin_lsn = begin_lsn;
+ ZERO_LSN(td->last_lsn);
+ td->status = TXN_RUNNING;
+ if (txn->parent != NULL)
+ td->parent = txn->parent->off;
+ else
+ td->parent = INVALID_ROFF;
+
+ td->flags = 0;
+ off = R_OFFSET(&mgr->reginfo, td);
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ ZERO_LSN(txn->last_lsn);
+ txn->txnid = id;
+ txn->off = (u_int32_t)off;
+
+ txn->abort = __txn_abort;
+ txn->commit = __txn_commit;
+ txn->discard = __txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __txn_prepare;
+ txn->set_timeout = __txn_set_timeout;
+
+ /*
+ * If this is a transaction family, we must link the child to the
+ * maximal grandparent in the lock table for deadlock detection.
+ */
+ if (txn->parent != NULL && LOCKING_ON(dbenv))
+ if ((ret = __lock_addfamilylocker(dbenv,
+ txn->parent->txnid, txn->txnid)) != 0)
+ return (ret);
+
+ if (F_ISSET(txn, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ return (0);
+
+err: R_UNLOCK(dbenv, &mgr->reginfo);
+ return (ret);
+}
+
+/*
+ * __txn_commit --
+ * Commit a transaction.
+ *
+ * PUBLIC: int __txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+__txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret, t_ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_COMMIT)) != 0)
+ return (ret);
+
+ /*
+ * We clear flags that are incorrect, ignoring any flag errors, and
+ * default to synchronous operations. By definition, transaction
+ * handles are dead when we return, and this error should never
+ * happen, but we don't want to fail in the field 'cause the app is
+ * specifying the wrong flag for some reason.
+ */
+ if (__db_fchk(dbenv,
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (__db_fcchk(dbenv,
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0)
+ flags = DB_TXN_SYNC;
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ F_CLR(txnp, TXN_SYNC);
+ F_SET(txnp, TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_SYNC)) {
+ F_CLR(txnp, TXN_NOSYNC);
+ F_SET(txnp, TXN_SYNC);
+ }
+
+ /*
+ * Commit any unresolved children. If anyone fails to commit,
+ * then try to abort the rest of the kids and then abort the parent.
+ * Abort should never fail; if it does, we bail out immediately.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->commit(kid, flags)) != 0)
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((t_ret = kid->abort(kid)) != 0)
+ return (__db_panic(dbenv, t_ret));
+
+ /*
+ * Process any aborted pages from our children.
+ * We delay putting pages on the free list that are newly
+ * allocated and then aborted so that we can undo other
+ * allocations, if necessary, without worrying about
+ * these pages which were not on the free list before.
+ */
+ if (txnp->txn_list != NULL) {
+ t_ret = __db_do_the_limbo(dbenv, NULL, txnp, txnp->txn_list);
+ __db_txnlist_end(dbenv, txnp->txn_list);
+ txnp->txn_list = NULL;
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (ret != 0)
+ goto err;
+
+ /*
+ * If there are any log records, write a log record and sync the log,
+ * else do no log writes. If the commit is for a child transaction,
+ * we do not need to commit the child synchronously since it may still
+ * abort (if its parent aborts), and otherwise its parent or ultimate
+ * ancestor will write synchronously.
+ */
+ if (DBENV_LOGGING(dbenv) && !IS_ZERO_LSN(txnp->last_lsn)) {
+ if (txnp->parent == NULL) {
+ /*
+ * We are about to free all the read locks
+ * for this transaction below. Some of those
+ * locks might be handle locks which should
+ * not be freed, because they will be freed
+ * when the handle is closed. Check the
+ * events and preprocess any trades now so
+ * that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ goto err;
+ request.op = DB_LOCK_PUT_READ;
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ goto err;
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if ((ret = __txn_regop_log(dbenv,
+ txnp, &txnp->last_lsn, lflags,
+ TXN_COMMIT, (int32_t)time(NULL))) != 0)
+ goto err;
+ } else {
+ /* Log the commit in the parent! */
+ if ((ret = __txn_child_log(dbenv,
+ txnp->parent, &txnp->parent->last_lsn,
+ 0, txnp->txnid, &txnp->last_lsn)) != 0) {
+ goto err;
+ }
+
+ F_SET(txnp->parent, TXN_CHILDCOMMIT);
+ }
+ }
+
+ /* This is OK because __txn_end can only fail with a panic. */
+ return (__txn_end(txnp, 1));
+
+err: /*
+ * If we are prepared, then we "must" be able to commit. We
+ * panic here because even though the coordinator might be
+ * able to retry it is not clear it would know to do that.
+ * Otherwise we'll try to abort. If that is successful,
+ * then we return whatever was in ret (i.e., the reason we failed).
+ * If the abort was unsuccessful, then abort probably returned
+ * DB_RUNRECOVERY and we need to propagate that up.
+ */
+ if (td->status == TXN_PREPARED)
+ return (__db_panic(dbenv, ret));
+
+ if ((t_ret = txnp->abort(txnp)) != 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __txn_abort --
+ * Abort a transaction.
+ *
+ * PUBLIC: int __txn_abort __P((DB_TXN *));
+ */
+int
+__txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Ensure that abort always fails fatally. */
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_ABORT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Try to abort any unresolved children.
+ *
+ * Abort either succeeds or panics the region. As soon as we
+ * see any failure, we just get out of here and return the panic
+ * up.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->abort(kid)) != 0)
+ return (ret);
+
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * We are about to free all the read locks for this transaction
+ * below. Some of those locks might be handle locks which
+ * should not be freed, because they will be freed when the
+ * handle is closed. Check the events and preprocess any
+ * trades now so that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* Turn off timeouts. */
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_TXN_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_LOCK_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ request.op = DB_LOCK_UPGRADE_WRITE;
+ if ((ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
+ }
+ if ((ret = __txn_undo(txnp)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Normally, we do not need to log aborts. However, if we
+ * are a distributed transaction (i.e., we have a prepare),
+ * then we log the abort so we know that this transaction
+ * was actually completed.
+ */
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if (DBENV_LOGGING(dbenv) && td->status == TXN_PREPARED &&
+ (ret = __txn_regop_log(dbenv, txnp, &txnp->last_lsn,
+ lflags, TXN_ABORT, (int32_t)time(NULL))) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* __txn_end always panics if it errors, so pass the return along. */
+ return (__txn_end(txnp, 0));
+}
+
+/*
+ * __txn_discard --
+ * Free the per-process resources associated with this txn handle.
+ *
+ * PUBLIC: int __txn_discard __P((DB_TXN *, u_int32_t flags));
+ */
+int
+__txn_discard(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_TXN *freep;
+ TXN_DETAIL *td;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ dbenv = txnp->mgrp->dbenv;
+ freep = NULL;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_DISCARD)) != 0)
+ return (ret);
+
+ /* Should be no children. */
+ DB_ASSERT(TAILQ_FIRST(&txnp->kids) == NULL);
+ DB_ASSERT(F_ISSET(td, TXN_RESTORED));
+
+ /* Free the space. */
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ txnp->mgrp->n_discards++;
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ TAILQ_REMOVE(&txnp->mgrp->txn_chain, txnp, links);
+ freep = txnp;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ if (freep != NULL)
+ __os_free(dbenv, freep);
+
+ return (0);
+}
+
+/*
+ * __txn_prepare --
+ * Flush the log so a future commit is guaranteed to succeed.
+ *
+ * PUBLIC: int __txn_prepare __P((DB_TXN *, u_int8_t *));
+ */
+int
+__txn_prepare(txnp, gid)
+ DB_TXN *txnp;
+ u_int8_t *gid;
+{
+ DBT xid;
+ DB_ENV *dbenv;
+ DB_TXN *kid;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret;
+
+ dbenv = txnp->mgrp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_PREPARE)) != 0)
+ return (ret);
+
+ /* Commit any unresolved children. */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->commit(kid, DB_TXN_NOSYNC)) != 0)
+ return (ret);
+
+ /*
+ * In XA, the global transaction ID in the txn_detail structure is
+ * already set; in a non-XA environment, we must set it here. XA
+ * requires that the transaction be either ENDED or SUSPENDED when
+ * prepare is called, so we know that if the xa_status isn't in one
+ * of those states, then we are calling prepare directly and we need
+ * to fill in the td->xid.
+ */
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&xid, 0, sizeof(xid));
+ if (td->xa_status != TXN_XA_ENDED &&
+ td->xa_status != TXN_XA_SUSPENDED)
+ /* Regular prepare; fill in the gid. */
+ memcpy(td->xid, gid, sizeof(td->xid));
+
+ xid.size = sizeof(td->xid);
+ xid.data = td->xid;
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if ((ret = __txn_xa_regop_log(dbenv, txnp, &txnp->last_lsn,
+ lflags, TXN_PREPARE, &xid, td->format, td->gtrid, td->bqual,
+ &td->begin_lsn)) != 0) {
+ __db_err(dbenv, "DB_TXN->prepare: log_write failed %s",
+ db_strerror(ret));
+ return (ret);
+ }
+ }
+
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ td->status = TXN_PREPARED;
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ return (0);
+}
+
+/*
+ * __txn_id --
+ * Return the transaction ID.
+ *
+ * PUBLIC: u_int32_t __txn_id __P((DB_TXN *));
+ */
+u_int32_t
+__txn_id(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->txnid);
+}
+
+/*
+ * __txn_set_timeout --
+ * Set timeout values in the txn structure.
+ */
+static int
+__txn_set_timeout(txnp, timeout, op)
+ DB_TXN *txnp;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ if (op != DB_SET_TXN_TIMEOUT && op != DB_SET_LOCK_TIMEOUT)
+ return (__db_ferr(txnp->mgrp->dbenv, "DB_TXN->set_timeout", 0));
+
+ return (__lock_set_timeout(
+ txnp->mgrp->dbenv, txnp->txnid, timeout, op));
+}
+
+/*
+ * __txn_isvalid --
+ * Return 0 if the txnp is reasonable, otherwise panic.
+ */
+static int
+__txn_isvalid(txnp, tdp, op)
+ const DB_TXN *txnp;
+ TXN_DETAIL **tdp;
+ txnop_t op;
+{
+ DB_TXNMGR *mgrp;
+ DB_TXNREGION *region;
+ TXN_DETAIL *tp;
+
+ mgrp = txnp->mgrp;
+ region = mgrp->reginfo.primary;
+
+ /* Check for recovery. */
+ if (!F_ISSET(txnp, TXN_COMPENSATE) &&
+ F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(mgrp->dbenv,
+ "operation not permitted during recovery");
+ goto err;
+ }
+
+ /* Check for live cursors. */
+ if (txnp->cursors != 0) {
+ __db_err(mgrp->dbenv, "transaction has active cursors");
+ goto err;
+ }
+
+ /* Check transaction's state. */
+ tp = (TXN_DETAIL *)R_ADDR(&mgrp->reginfo, txnp->off);
+ if (tdp != NULL)
+ *tdp = tp;
+
+ /* Handle any operation specific checks. */
+ switch (op) {
+ case TXN_OP_DISCARD:
+ /*
+ * Since we're just tossing the per-process space; there are
+ * a lot of problems with the transaction that we can tolerate.
+ */
+
+ /* Transaction is already been reused. */
+ if (txnp->txnid != tp->txnid)
+ return (0);
+
+ /* What we've got had better be a restored transaction. */
+ if (!F_ISSET(tp, TXN_RESTORED)) {
+ __db_err(mgrp->dbenv, "not a restored transaction");
+ return (__db_panic(mgrp->dbenv, EINVAL));
+ }
+
+ return (0);
+ case TXN_OP_PREPARE:
+ if (txnp->parent != NULL) {
+ /*
+ * This is not fatal, because you could imagine an
+ * application that simply prepares everybody because
+ * it doesn't distinguish between children and parents.
+ * I'm not arguing this is good, but I could imagine
+ * someone doing it.
+ */
+ __db_err(mgrp->dbenv,
+ "Prepare disallowed on child transactions");
+ return (EINVAL);
+ }
+ break;
+ case TXN_OP_ABORT:
+ case TXN_OP_COMMIT:
+ default:
+ break;
+ }
+
+ switch (tp->status) {
+ case TXN_PREPARED:
+ if (op == TXN_OP_PREPARE) {
+ __db_err(mgrp->dbenv, "transaction already prepared");
+ /*
+ * Txn_prepare doesn't blow away the user handle, so
+ * in this case, give the user the opportunity to
+ * abort or commit.
+ */
+ return (EINVAL);
+ }
+ break;
+ case TXN_RUNNING:
+ break;
+ case TXN_ABORTED:
+ case TXN_COMMITTED:
+ default:
+ __db_err(mgrp->dbenv, "transaction already %s",
+ tp->status == TXN_COMMITTED ? "committed" : "aborted");
+ goto err;
+ }
+
+ return (0);
+
+err: /*
+ * If there's a serious problem with the transaction, panic. TXN
+ * handles are dead by definition when we return, and if you use
+ * a cursor you forgot to close, we have no idea what will happen.
+ */
+ return (__db_panic(mgrp->dbenv, EINVAL));
+}
+
+/*
+ * __txn_end --
+ * Internal transaction end routine.
+ */
+static int
+__txn_end(txnp, is_commit)
+ DB_TXN *txnp;
+ int is_commit;
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ request;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *tp;
+ int do_closefiles, ret;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ /* Process commit events. */
+ if ((ret = __txn_doevents(dbenv, txnp, is_commit, 0)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* Release the locks. */
+ request.op = txnp->parent == NULL ||
+ is_commit == 0 ? DB_LOCK_PUT_ALL : DB_LOCK_INHERIT;
+
+ /*
+ * __txn_end cannot return an simple error, we MUST return
+ * success/failure from commit or abort, ignoring any internal
+ * errors. So, we panic if something goes wrong. We can't
+ * deadlock here because we're not acquiring any new locks,
+ * so DB_LOCK_DEADLOCK is just as fatal as any other error.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, DB_LOCK_FREE_LOCKER, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* End the transaction. */
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ tp = (TXN_DETAIL *)R_ADDR(&mgr->reginfo, txnp->off);
+ SH_TAILQ_REMOVE(&region->active_txn, tp, links, __txn_detail);
+ if (F_ISSET(tp, TXN_RESTORED)) {
+ region->stat.st_nrestores--;
+ do_closefiles = region->stat.st_nrestores == 0;
+ }
+
+ __db_shalloc_free(mgr->reginfo.addr, tp);
+
+ if (is_commit)
+ region->stat.st_ncommits++;
+ else
+ region->stat.st_naborts++;
+ --region->stat.st_nactive;
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * The transaction cannot get more locks, remove its locker info,
+ * if any.
+ */
+ if (LOCKING_ON(dbenv) && (ret =
+ __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid)) != 0)
+ return (__db_panic(dbenv, ret));
+ if (txnp->parent != NULL)
+ TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
+
+ /* Free the space. */
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+
+ __os_free(dbenv, txnp);
+ }
+
+ if (do_closefiles) {
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ (void)__dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ mgr->n_discards = 0;
+ (void)dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE);
+ }
+ return (0);
+}
+
+/*
+ * __txn_undo --
+ * Undo the transaction with id txnid. Returns 0 on success and
+ * errno on failure.
+ */
+static int
+__txn_undo(txnp)
+ DB_TXN *txnp;
+{
+ DBT rdbt;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN key_lsn;
+ DB_TXN *ptxn;
+ DB_TXNMGR *mgr;
+ int ret, t_ret;
+ void *txnlist;
+
+ mgr = txnp->mgrp;
+ dbenv = mgr->dbenv;
+ logc = NULL;
+ txnlist = NULL;
+
+ if (!DBENV_LOGGING(dbenv))
+ return (0);
+
+ /*
+ * This is the simplest way to code this, but if the mallocs during
+ * recovery turn out to be a performance issue, we can do the
+ * allocation here and use DB_DBT_USERMEM.
+ */
+ memset(&rdbt, 0, sizeof(rdbt));
+
+ key_lsn = txnp->last_lsn;
+
+ /*
+ * Allocate a txnlist for children and aborted page allocs.
+ * We need to associate the list with the maximal parent
+ * so that aborted pages are recovered when that transaction
+ * is commited or aborted.
+ */
+ for (ptxn = txnp->parent; ptxn != NULL && ptxn->parent != NULL;)
+ ptxn = ptxn->parent;
+
+ if (ptxn != NULL && ptxn->txn_list != NULL)
+ txnlist = ptxn->txn_list;
+ else if (txnp->txn_list != NULL)
+ txnlist = txnp->txn_list;
+ else if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txnlist)) != 0)
+ return (ret);
+ else if (ptxn != NULL)
+ ptxn->txn_list = txnlist;
+
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT) &&
+ (ret = __db_txnlist_lsninit(dbenv, txnlist, &txnp->last_lsn)) != 0)
+ return (ret);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ while (ret == 0 && !IS_ZERO_LSN(key_lsn)) {
+ /*
+ * The dispatch routine returns the lsn of the record
+ * before the current one in the key_lsn argument.
+ */
+ if ((ret = logc->get(logc, &key_lsn, &rdbt, DB_SET)) == 0) {
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &rdbt, &key_lsn,
+ DB_TXN_ABORT, txnlist);
+ if (F_ISSET(txnp, TXN_CHILDCOMMIT))
+ (void)__db_txnlist_lsnadd(dbenv,
+ txnlist, &key_lsn, 0);
+ }
+ if (ret == DB_SURPRISE_KID) {
+ if ((ret = __db_txnlist_lsninit(
+ dbenv, txnlist, &key_lsn)) == 0)
+ F_SET(txnp, TXN_CHILDCOMMIT);
+ } else if (ret != 0) {
+ __db_err(txnp->mgrp->dbenv,
+ "DB_TXN->abort: Log undo failed for LSN: %lu %lu: %s",
+ (u_long)key_lsn.file, (u_long)key_lsn.offset,
+ db_strerror(ret));
+ goto err;
+ }
+ }
+
+ ret = __db_do_the_limbo(dbenv, ptxn, txnp, txnlist);
+
+err: if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ptxn == NULL && txnlist != NULL)
+ __db_txnlist_end(dbenv, txnlist);
+ return (ret);
+}
+
+/*
+ * Transaction checkpoint.
+ * If either kbytes or minutes is non-zero, then we only take the checkpoint
+ * more than "minutes" minutes have passed since the last checkpoint or if
+ * more than "kbytes" of log data have been written since the last checkpoint.
+ * When taking a checkpoint, find the oldest active transaction and figure out
+ * its first LSN. This is the lowest LSN we can checkpoint, since any record
+ * written after since that point may be involved in a transaction and may
+ * therefore need to be undone in the case of an abort.
+ *
+ * PUBLIC: int __txn_checkpoint
+ * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ */
+int
+__txn_checkpoint(dbenv, kbytes, minutes, flags)
+ DB_ENV *dbenv;
+ u_int32_t kbytes, minutes, flags;
+{
+ DB_LSN ckp_lsn, last_ckp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ TXN_DETAIL *txnp;
+ time_t last_ckp_time, now;
+ u_int32_t bytes, mbytes;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "txn_checkpoint", DB_INIT_TXN);
+
+ /*
+ * On a replication client, all transactions are read-only; therefore,
+ * a checkpoint is a null-op.
+ *
+ * We permit txn_checkpoint, instead of just rendering it illegal,
+ * so that an application can just let a checkpoint thread continue
+ * to operate as it gets promoted or demoted between being a
+ * master and a client.
+ */
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * The checkpoint LSN is an LSN such that all transactions begun before
+ * it are complete. Our first guess (corrected below based on the list
+ * of active transactions) is the last-written LSN.
+ */
+ __log_txn_lsn(dbenv, &ckp_lsn, &mbytes, &bytes);
+
+ if (!LF_ISSET(DB_FORCE)) {
+ /* Don't checkpoint a quiescent database. */
+ if (bytes == 0 && mbytes == 0)
+ return (0);
+
+ if (kbytes != 0 &&
+ mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes)
+ goto do_ckp;
+
+ if (minutes != 0) {
+ (void)time(&now);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp_time = region->time_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (now - last_ckp_time >= (time_t)(minutes * 60))
+ goto do_ckp;
+ }
+
+ /*
+ * If we checked time and data and didn't go to checkpoint,
+ * we're done.
+ */
+ if (minutes != 0 || kbytes != 0)
+ return (0);
+ }
+
+do_ckp: /* Look through the active transactions for the lowest begin LSN. */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail))
+ if (!IS_ZERO_LSN(txnp->begin_lsn) &&
+ log_compare(&txnp->begin_lsn, &ckp_lsn) < 0)
+ ckp_lsn = txnp->begin_lsn;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (MPOOL_ON(dbenv) && (ret = dbenv->memp_sync(dbenv, NULL)) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: failed to flush the buffer cache %s",
+ db_strerror(ret));
+ return (ret);
+ }
+
+ /*
+ * Because we can't be a replication client here, and because
+ * recovery (somewhat unusually) calls txn_checkpoint and expects
+ * it to write a log message, LOGGING_ON is the correct macro here.
+ */
+ if (LOGGING_ON(dbenv)) {
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp = region->last_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Put out records for the open files before we log
+ * the checkpoint. The records are certain to be at
+ * or after ckp_lsn, but before the checkpoint record
+ * itself, so they're sure to be included if we start
+ * recovery from the ckp_lsn contained in this
+ * checkpoint.
+ */
+ if ((ret = __dbreg_open_files(dbenv)) != 0 ||
+ (ret = __txn_ckp_log(dbenv,
+ NULL, &ckp_lsn, DB_PERMANENT | DB_FLUSH, &ckp_lsn,
+ &last_ckp, (int32_t)time(NULL))) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: log failed at LSN [%ld %ld] %s",
+ (long)ckp_lsn.file, (long)ckp_lsn.offset,
+ db_strerror(ret));
+ return (ret);
+ }
+
+ __txn_updateckp(dbenv, &ckp_lsn);
+ }
+ return (0);
+}
+
+/*
+ * __txn_getckp --
+ * Get the LSN of the last transaction checkpoint.
+ *
+ * PUBLIC: int __txn_getckp __P((DB_ENV *, DB_LSN *));
+ */
+int
+__txn_getckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LSN lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ lsn = region->last_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (IS_ZERO_LSN(lsn))
+ return (DB_NOTFOUND);
+
+ *lsnp = lsn;
+ return (0);
+}
+
+/*
+ * __txn_activekids --
+ * Return if this transaction has any active children.
+ *
+ * PUBLIC: int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+ */
+int
+__txn_activekids(dbenv, rectype, txnp)
+ DB_ENV *dbenv;
+ u_int32_t rectype;
+ DB_TXN *txnp;
+{
+ /*
+ * On a child commit, we know that there are children (i.e., the
+ * commiting child at the least. In that case, skip this check.
+ */
+ if (F_ISSET(txnp, TXN_COMPENSATE) || rectype == DB___txn_child)
+ return (0);
+
+ if (TAILQ_FIRST(&txnp->kids) != NULL) {
+ __db_err(dbenv, "Child transaction is active");
+ return (EPERM);
+ }
+ return (0);
+}
+
+/*
+ * __txn_force_abort --
+ * Force an abort record into the log if the commit record
+ * failed to get to disk.
+ *
+ * PUBLIC: int __txn_force_abort __P((DB_ENV *, u_int8_t *));
+ */
+int
+__txn_force_abort(dbenv, buffer)
+ DB_ENV *dbenv;
+ u_int8_t *buffer;
+{
+ DB_CIPHER *db_cipher;
+ HDR *hdr;
+ u_int32_t offset, opcode, rec_len, rec_type, sum_len;
+ u_int8_t *bp, *key, chksum[DB_MAC_KEY];
+ size_t hdrsize;
+ int ret;
+
+ db_cipher = dbenv->crypto_handle;
+
+ /*
+ * This routine depends on the layout of HDR and the __txn_regop
+ * record in txn.src. We are passed the beginning of the commit
+ * record in the log buffer and overwrite the commit with an abort
+ * and recalculate the checksum. We may be passed a txn_xa_regop
+ * that is, an XA prepare), there's no need to overwrite that one.
+ */
+ hdr = (HDR *)buffer;
+ memcpy(&rec_type, hdr, sizeof(rec_type));
+ if (rec_type == DB___txn_xa_regop)
+ return (0);
+
+ offset = sizeof(u_int32_t) + sizeof(u_int32_t) + sizeof(DB_LSN);
+ rec_len = offset + sizeof(u_int32_t) + sizeof(int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ key = db_cipher->mac_key;
+ hdrsize = HDR_CRYPTO_SZ;
+ sum_len = DB_MAC_KEY;
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+ } else {
+ key = NULL;
+ hdrsize = HDR_NORMAL_SZ;
+ sum_len = sizeof(u_int32_t);
+ }
+ bp = buffer + hdrsize + offset;
+ opcode = TXN_ABORT;
+ memcpy(bp, &opcode, sizeof(opcode));
+
+ if (CRYPTO_ON(dbenv) &&
+ (ret = db_cipher->encrypt(dbenv,
+ db_cipher->data, &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ __db_chksum(buffer + hdrsize, rec_len, key, chksum);
+ memcpy(buffer + SSZ(HDR, chksum), &chksum, sum_len);
+
+ return (0);
+}
+
+/*
+ * __txn_preclose
+ * Before we can close an environment, we need to check if we
+ * were in the midst of taking care of restored transactions. If
+ * so, then we need to close the files that we opened.
+ *
+ * PUBLIC: int __txn_preclose __P((DB_ENV *));
+ */
+int
+__txn_preclose(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_closefiles, ret;
+
+ mgr = (DB_TXNMGR *)dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region != NULL &&
+ region->stat.st_nrestores
+ <= mgr->n_discards && mgr->n_discards != 0)
+ do_closefiles = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_closefiles) {
+ /*
+ * Set the DBLOG_RECOVER flag while closing these
+ * files so they do not create additional log records
+ * that will confuse future recoveries.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ ret = __dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * __txn_reset --
+ * Reset the last txnid to its minimum value, and log the reset.
+ *
+ * PUBLIC: int __txn_reset __P((DB_ENV *));
+ */
+int
+__txn_reset(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LSN scrap;
+ DB_TXNREGION *region;
+
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ region->last_txnid = TXN_MINIMUM;
+
+ DB_ASSERT(LOGGING_ON(dbenv));
+ return (__txn_recycle_log(dbenv,
+ NULL, &scrap, 0, TXN_MINIMUM, TXN_MAXIMUM));
+}
+
+/*
+ * __txn_updateckp --
+ * Update the last_ckp field in the transaction region. This happens
+ * at the end of a normal checkpoint and also when a replication client
+ * receives a checkpoint record.
+ *
+ * PUBLIC: void __txn_updateckp __P((DB_ENV *, DB_LSN *));
+ */
+void
+__txn_updateckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * We want to make sure last_ckp only moves forward; since
+ * we drop locks above and in log_put, it's possible
+ * for two calls to __txn_ckp_log to finish in a different
+ * order from how they were called.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (log_compare(&region->last_ckp, lsnp) < 0) {
+ region->last_ckp = *lsnp;
+ (void)time(&region->time_ckp);
+ }
+ R_UNLOCK(dbenv, &mgr->reginfo);
+}
diff --git a/storage/bdb/txn/txn.src b/storage/bdb/txn/txn.src
new file mode 100644
index 00000000000..3f69b29e3ff
--- /dev/null
+++ b/storage/bdb/txn/txn.src
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: txn.src,v 11.19 2002/03/27 04:33:15 bostic Exp $
+ */
+
+PREFIX __txn
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * This is the standard log operation for commit.
+ * Note that we are using an int32_t for the timestamp. This means that
+ * in 2039 we will need to deprecate this log record and create one that
+ * either changes the Epoch or has a 64-bit offset.
+ */
+BEGIN regop 10
+ARG opcode u_int32_t lu
+ARG timestamp int32_t ld
+END
+
+/*
+ * This is the checkpoint record. It contains the lsn that the checkpoint
+ * guarantees and a pointer to the last checkpoint so we can walk backwards
+ * by checkpoint.
+ *
+ * ckp_lsn:
+ * The lsn in the log of the most recent point at which all begun
+ * transactions have been aborted. This is the point for which
+ * the checkpoint is relevant.
+ * last_ckp:
+ * The previous checkpoint.
+ * timestamp:
+ * See comment in commit about timestamps.
+ */
+BEGIN ckp 11
+POINTER ckp_lsn DB_LSN * lu
+POINTER last_ckp DB_LSN * lu
+ARG timestamp int32_t ld
+END
+
+/*
+ * This is the (new) log operation for a child commit. It is
+ * logged as a record in the PARENT. The child field contains
+ * the transaction ID of the child committing and the c_lsn is
+ * the last LSN of the child's log trail.
+ */
+BEGIN child 12
+ARG child u_int32_t lx
+POINTER c_lsn DB_LSN * lu
+END
+
+
+/*
+ * This is the standard log operation for prepare.
+ */
+BEGIN xa_regop 13
+ARG opcode u_int32_t lu
+DBT xid DBT s
+ARG formatID int32_t ld
+ARG gtrid u_int32_t u
+ARG bqual u_int32_t u
+POINTER begin_lsn DB_LSN * lu
+END
+
+/*
+ * Log the fact that we are recycling txnids.
+ */
+BEGIN recycle 14
+ARG min u_int32_t u
+ARG max u_int32_t u
+END
diff --git a/storage/bdb/txn/txn_method.c b/storage/bdb/txn/txn_method.c
new file mode 100644
index 00000000000..60fdf30583e
--- /dev/null
+++ b/storage/bdb/txn/txn_method.c
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_method.c,v 11.62 2002/05/09 20:09:35 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
+static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
+
+/*
+ * __txn_dbenv_create --
+ * Transaction specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
+ */
+void
+__txn_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->tx_max = DEF_MAX_TXNS;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_tx_max = __dbcl_set_tx_max;
+ dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
+ dbenv->txn_checkpoint = __dbcl_txn_checkpoint;
+ dbenv->txn_recover = __dbcl_txn_recover;
+ dbenv->txn_stat = __dbcl_txn_stat;
+ dbenv->txn_begin = __dbcl_txn_begin;
+ } else
+#endif
+ {
+ dbenv->set_tx_max = __txn_set_tx_max;
+ dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+ dbenv->txn_checkpoint = __txn_checkpoint;
+#ifdef CONFIG_TEST
+ dbenv->txn_id_set = __txn_id_set;
+#endif
+ dbenv->txn_recover = __txn_recover;
+ dbenv->txn_stat = __txn_stat;
+ dbenv->txn_begin = __txn_begin;
+ }
+}
+
+/*
+ * __txn_set_tx_max --
+ * Set the size of the transaction table.
+ */
+static int
+__txn_set_tx_max(dbenv, tx_max)
+ DB_ENV *dbenv;
+ u_int32_t tx_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
+
+ dbenv->tx_max = tx_max;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_timestamp --
+ * Set the transaction recovery timestamp.
+ */
+static int
+__txn_set_tx_timestamp(dbenv, timestamp)
+ DB_ENV *dbenv;
+ time_t *timestamp;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
+
+ dbenv->tx_timestamp = *timestamp;
+ return (0);
+}
diff --git a/storage/bdb/txn/txn_rec.c b/storage/bdb/txn/txn_rec.c
new file mode 100644
index 00000000000..69af6a1f907
--- /dev/null
+++ b/storage/bdb/txn/txn_rec.c
@@ -0,0 +1,436 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_rec.c,v 11.41 2002/08/06 04:42:37 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_dispatch.h"
+
+#define IS_XA_TXN(R) (R->xid.size != 0)
+
+/*
+ * PUBLIC: int __txn_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for commits. Normally, we redo any
+ * committed transaction, however if we are doing recovery to a timestamp, then
+ * we may treat transactions that commited after the timestamp as aborted.
+ */
+int
+__txn_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_TXNHEAD *headp;
+ __txn_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ headp = info;
+ /*
+ * We are only ever called during FORWARD_ROLL or BACKWARD_ROLL.
+ * We check for the former explicitly and the last two clauses
+ * apply to the BACKWARD_ROLL case.
+ */
+
+ if (op == DB_TXN_FORWARD_ROLL)
+ /*
+ * If this was a 2-phase-commit transaction, then it
+ * might already have been removed from the list, and
+ * that's OK. Ignore the return code from remove.
+ */
+ (void)__db_txnlist_remove(dbenv, info, argp->txnid->txnid);
+ else if ((dbenv->tx_timestamp != 0 &&
+ argp->timestamp > (int32_t)dbenv->tx_timestamp) ||
+ (!IS_ZERO_LSN(headp->trunc_lsn) &&
+ log_compare(&headp->trunc_lsn, lsnp) < 0)) {
+ /*
+ * We failed either the timestamp check or the trunc_lsn check,
+ * so we treat this as an abort even if it was a commit record.
+ */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, TXN_ABORT, NULL);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_IGNORE, NULL);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ } else {
+ /* This is a normal commit; mark it appropriately. */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, argp->opcode, lsnp);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid,
+ argp->opcode == TXN_ABORT ?
+ TXN_IGNORE : argp->opcode, lsnp);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ }
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+ if (0) {
+err: __db_err(dbenv,
+ "txnid %lx commit record found, already on commit list",
+ argp->txnid->txnid);
+ ret = EINVAL;
+ }
+ __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_xa_regop_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ *
+ * These records are only ever written for prepares.
+ */
+int
+__txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_xa_regop_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_xa_regop_print(dbenv, dbtp, lsnp, op, info);
+#endif
+
+ if ((ret = __txn_xa_regop_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (argp->opcode != TXN_PREPARE) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ ret = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
+
+ /*
+ * If we are rolling forward, then an aborted prepare
+ * indicates that this may the last record we'll see for
+ * this transaction ID, so we should remove it from the
+ * list.
+ */
+
+ if (op == DB_TXN_FORWARD_ROLL) {
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK)
+ goto txn_err;
+ } else if (op == DB_TXN_BACKWARD_ROLL && ret == TXN_PREPARE) {
+ /*
+ * On the backward pass, we have three possibilities:
+ * 1. The transaction is already committed, no-op.
+ * 2. The transaction is already aborted, no-op.
+ * 3. The transaction is neither committed nor aborted.
+ * Treat this like a commit and roll forward so that
+ * the transaction can be resurrected in the region.
+ * We handle case 3 here; cases 1 and 2 are the final clause
+ * below.
+ * This is prepared, but not yet committed transaction. We
+ * need to add it to the transaction list, so that it gets
+ * rolled forward. We also have to add it to the region's
+ * internal state so it can be properly aborted or committed
+ * after recovery (see txn_recover).
+ */
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK) {
+txn_err: __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ } else if ((ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_COMMIT, lsnp)) == 0)
+ ret = __txn_restore_txn(dbenv, lsnp, argp);
+ } else
+ ret = 0;
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+err: __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __txn_ckp_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_ckp_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ __txn_ckp_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ if (op == DB_TXN_BACKWARD_ROLL)
+ __db_txnlist_ckp(dbenv, info, lsnp);
+
+ *lsnp = argp->last_ckp;
+ __os_free(dbenv, argp);
+ return (DB_TXN_CKP);
+}
+
+/*
+ * __txn_child_recover
+ * Recover a commit record for a child transaction.
+ *
+ * PUBLIC: int __txn_child_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_child_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_child_args *argp;
+ int c_stat, p_stat, ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_child_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * This is a record in a PARENT's log trail indicating that a
+ * child commited. If we are aborting, we need to update the
+ * parent's LSN array. If we are in recovery, then if the
+ * parent is commiting, we set ourselves up to commit, else
+ * we do nothing.
+ */
+ if (op == DB_TXN_ABORT) {
+ /* Note that __db_txnlist_lsnadd rewrites its LSN
+ * parameter, so you cannot reuse the argp->c_lsn field.
+ */
+ ret = __db_txnlist_lsnadd(dbenv,
+ info, &argp->c_lsn, TXNLIST_NEW);
+ } else if (op == DB_TXN_BACKWARD_ROLL) {
+ /* Child might exist -- look for it. */
+ c_stat = __db_txnlist_find(dbenv, info, argp->child);
+ p_stat = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
+
+ if (c_stat == TXN_EXPECTED) {
+ /*
+ * The open after this create succeeded. If the
+ * parent succeeded, we don't want to redo; if the
+ * parent aborted, we do want to undo.
+ */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_IGNORE : TXN_ABORT,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat == TXN_UNEXPECTED) {
+ /*
+ * The open after this create failed. If the parent
+ * is rolling forward, we need to roll forward. If
+ * the parent failed, then we do not want to abort
+ * (because the file may not be the one in which we
+ * are interested).
+ */
+ ret = __db_txnlist_update(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_IGNORE,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat != TXN_IGNORE) {
+ ret = __db_txnlist_add(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_ABORT,
+ NULL);
+ }
+ } else {
+ /* Forward Roll */
+ if ((ret =
+ __db_txnlist_remove(dbenv, info, argp->child)) != TXN_OK) {
+ __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ }
+ }
+
+ if (ret == 0)
+ *lsnp = argp->prev_lsn;
+
+ __os_free(dbenv, argp);
+
+ return (ret);
+}
+
+/*
+ * __txn_restore_txn --
+ * Using only during XA recovery. If we find any transactions that are
+ * prepared, but not yet committed, then we need to restore the transaction's
+ * state into the shared region, because the TM is going to issue an abort
+ * or commit and we need to respond correctly.
+ *
+ * lsnp is the LSN of the returned LSN
+ * argp is the perpare record (in an appropriate structure)
+ *
+ * PUBLIC: int __txn_restore_txn __P((DB_ENV *,
+ * PUBLIC: DB_LSN *, __txn_xa_regop_args *));
+ */
+int
+__txn_restore_txn(dbenv, lsnp, argp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ __txn_xa_regop_args *argp;
+{
+ DB_TXNMGR *mgr;
+ TXN_DETAIL *td;
+ DB_TXNREGION *region;
+ int ret;
+
+ if (argp->xid.size == 0)
+ return (0);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ R_LOCK(dbenv, &mgr->reginfo);
+
+ /* Allocate a new transaction detail structure. */
+ if ((ret =
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ return (ret);
+ }
+
+ /* Place transaction on active transaction list. */
+ SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
+
+ td->txnid = argp->txnid->txnid;
+ td->begin_lsn = argp->begin_lsn;
+ td->last_lsn = *lsnp;
+ td->parent = 0;
+ td->status = TXN_PREPARED;
+ td->xa_status = TXN_XA_PREPARED;
+ memcpy(td->xid, argp->xid.data, argp->xid.size);
+ td->bqual = argp->bqual;
+ td->gtrid = argp->gtrid;
+ td->format = argp->formatID;
+ td->flags = 0;
+ F_SET(td, TXN_RESTORED);
+
+ region->stat.st_nrestores++;
+ region->stat.st_nactive++;
+ if (region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+ return (0);
+}
+
+/*
+ * __txn_recycle_recover --
+ * Recovery function for recycle.
+ *
+ * PUBLIC: int __txn_recycle_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_recycle_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_recycle_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ COMPQUIET(lsnp, NULL);
+
+ if ((ret = __db_txnlist_gen(dbenv, info,
+ DB_UNDO(op) ? -1 : 1, argp->min, argp->max)) != 0)
+ return (ret);
+
+ __os_free(dbenv, argp);
+
+ return (0);
+}
diff --git a/storage/bdb/txn/txn_recover.c b/storage/bdb/txn/txn_recover.c
new file mode 100644
index 00000000000..732a82e5030
--- /dev/null
+++ b/storage/bdb/txn/txn_recover.c
@@ -0,0 +1,306 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_recover.c,v 1.36 2002/08/19 16:59:15 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+
+/*
+ * __txn_continue
+ * Fill in the fields of the local transaction structure given
+ * the detail transaction structure.
+ *
+ * XXX
+ * I'm not sure that we work correctly with nested txns.
+ *
+ * PUBLIC: void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t));
+ */
+void
+__txn_continue(env, txnp, td, off)
+ DB_ENV *env;
+ DB_TXN *txnp;
+ TXN_DETAIL *td;
+ size_t off;
+{
+ txnp->mgrp = env->tx_handle;
+ txnp->parent = NULL;
+ txnp->last_lsn = td->last_lsn;
+ txnp->txnid = td->txnid;
+ txnp->off = (roff_t)off;
+
+ txnp->abort = __txn_abort;
+ txnp->commit = __txn_commit;
+ txnp->discard = __txn_discard;
+ txnp->id = __txn_id;
+ txnp->prepare = __txn_prepare;
+
+ txnp->flags = 0;
+}
+
+/*
+ * __txn_map_gid
+ * Return the txn that corresponds to this global ID.
+ *
+ * PUBLIC: int __txn_map_gid __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, TXN_DETAIL **, size_t *));
+ */
+int
+__txn_map_gid(dbenv, gid, tdp, offp)
+ DB_ENV *dbenv;
+ u_int8_t *gid;
+ TXN_DETAIL **tdp;
+ size_t *offp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * Search the internal active transaction table to find the
+ * matching xid. If this is a performance hit, then we
+ * can create a hash table, but I doubt it's worth it.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (*tdp = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ *tdp != NULL;
+ *tdp = SH_TAILQ_NEXT(*tdp, links, __txn_detail))
+ if (memcmp(gid, (*tdp)->xid, sizeof((*tdp)->xid)) == 0)
+ break;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (*tdp == NULL)
+ return (EINVAL);
+
+ *offp = R_OFFSET(&mgr->reginfo, *tdp);
+ return (0);
+}
+
+/*
+ * __txn_recover --
+ * Public interface to retrieve the list of prepared, but not yet
+ * commited transactions. See __txn_get_prepared for details. This
+ * function and __db_xa_recover both wrap that one.
+ *
+ * PUBLIC: int __txn_recover
+ * PUBLIC: __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_recover(dbenv, preplist, count, retp, flags)
+ DB_ENV *dbenv;
+ DB_PREPLIST *preplist;
+ long count, *retp;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(
+ dbenv, dbenv->tx_handle, "txn_recover", DB_INIT_TXN);
+
+ if (F_ISSET((DB_TXNREGION *)
+ ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary,
+ TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted while in recovery");
+ return (EINVAL);
+ }
+ return (__txn_get_prepared(dbenv, NULL, preplist, count, retp, flags));
+}
+
+/*
+ * __txn_get_prepared --
+ * Returns a list of prepared (and for XA, heuristically completed)
+ * transactions (less than or equal to the count parameter). One of
+ * xids or txns must be set to point to an array of the appropriate type.
+ * The count parameter indicates the number of entries in the xids and/or
+ * txns array. The retp parameter will be set to indicate the number of
+ * entries returned in the xids/txns array. Flags indicates the operation,
+ * one of DB_FIRST or DB_NEXT.
+ *
+ * PUBLIC: int __txn_get_prepared __P((DB_ENV *,
+ * PUBLIC: XID *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_get_prepared(dbenv, xids, txns, count, retp, flags)
+ DB_ENV *dbenv;
+ XID *xids;
+ DB_PREPLIST *txns;
+ long count; /* This is long for XA compatibility. */
+ long *retp;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LOGC *logc;
+ DB_LSN min, open_lsn;
+ DB_PREPLIST *prepp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+ TXN_DETAIL *td;
+ XID *xidp;
+ __txn_ckp_args *ckp_args;
+ long i;
+ int nrestores, open_files, ret, t_ret;
+ void *txninfo;
+
+ *retp = 0;
+
+ logc = NULL;
+ MAX_LSN(min);
+ prepp = txns;
+ xidp = xids;
+ nrestores = ret = 0;
+ open_files = 1;
+
+ /*
+ * If we are starting a scan, then we traverse the active transaction
+ * list once making sure that all transactions are marked as not having
+ * been collected. Then on each pass, we mark the ones we collected
+ * so that if we cannot collect them all at once, we can finish up
+ * next time with a continue.
+ */
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * During this pass we need to figure out if we are going to need
+ * to open files. We need to open files if we've never collected
+ * before (in which case, none of the COLLECTED bits will be set)
+ * and the ones that we are collecting are restored (if they aren't
+ * restored, then we never crashed; just the main server did).
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (flags == DB_FIRST) {
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (F_ISSET(td, TXN_RESTORED))
+ nrestores++;
+ if (F_ISSET(td, TXN_COLLECTED))
+ open_files = 0;
+ F_CLR(td, TXN_COLLECTED);
+ }
+ mgr->n_discards = 0;
+ } else
+ open_files = 0;
+
+ /* Now begin collecting active transactions. */
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL && *retp < count;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (td->status != TXN_PREPARED || F_ISSET(td, TXN_COLLECTED))
+ continue;
+
+ if (xids != NULL) {
+ xidp->formatID = td->format;
+ xidp->gtrid_length = td->gtrid;
+ xidp->bqual_length = td->bqual;
+ memcpy(xidp->data, td->xid, sizeof(td->xid));
+ xidp++;
+ }
+
+ if (txns != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXN), &prepp->txn)) != 0)
+ goto err;
+ __txn_continue(dbenv,
+ prepp->txn, td, R_OFFSET(&mgr->reginfo, td));
+ F_SET(prepp->txn, TXN_MALLOC);
+ memcpy(prepp->gid, td->xid, sizeof(td->xid));
+ prepp++;
+ }
+
+ if (log_compare(&td->begin_lsn, &min) < 0)
+ min = td->begin_lsn;
+
+ (*retp)++;
+ F_SET(td, TXN_COLLECTED);
+ }
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Now link all the transactions into the transaction manager's list.
+ */
+ if (txns != NULL) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ for (i = 0; i < *retp; i++)
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txns[i].txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ if (open_files && nrestores && *retp != 0 && !IS_MAX_LSN(min)) {
+ /*
+ * Figure out the last checkpoint before the smallest
+ * start_lsn in the region.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+
+ memset(&data, 0, sizeof(data));
+ if ((ret = __txn_getckp(dbenv, &open_lsn)) == 0)
+ while (!IS_ZERO_LSN(open_lsn) && (ret =
+ logc->get(logc, &open_lsn, &data, DB_SET)) == 0 &&
+ log_compare(&min, &open_lsn) < 0) {
+ /* Format the log record. */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)open_lsn.file,
+ (u_long)open_lsn.offset);
+ goto err;
+ }
+ open_lsn = ckp_args->last_ckp;
+ __os_free(dbenv, ckp_args);
+ }
+
+ /*
+ * There are three ways by which we may have gotten here.
+ * - We got a DB_NOTFOUND -- we need to read the first
+ * log record.
+ * - We found a checkpoint before min. We're done.
+ * - We found a checkpoint after min who's last_ckp is 0. We
+ * need to start at the beginning of the log.
+ */
+ if ((ret == DB_NOTFOUND || IS_ZERO_LSN(open_lsn)) &&
+ (ret = logc->get(logc, &open_lsn, &data, DB_FIRST)) != 0) {
+ __db_err(dbenv, "No log records");
+ goto err;
+ }
+
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+ ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &open_lsn, NULL, 0, 0);
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+ }
+
+err: F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/storage/bdb/txn/txn_region.c b/storage/bdb/txn/txn_region.c
new file mode 100644
index 00000000000..bf72d4f1d2c
--- /dev/null
+++ b/storage/bdb/txn/txn_region.c
@@ -0,0 +1,374 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_region.c,v 11.73 2002/08/06 04:42:37 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __txn_findlastckp __P((DB_ENV *, DB_LSN *));
+static int __txn_init __P((DB_ENV *, DB_TXNMGR *));
+static size_t __txn_region_size __P((DB_ENV *));
+
+/*
+ * __txn_open --
+ * Open a transaction region.
+ *
+ * PUBLIC: int __txn_open __P((DB_ENV *));
+ */
+int
+__txn_open(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *tmgrp;
+ int ret;
+
+ /* Create/initialize the transaction manager structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXNMGR), &tmgrp)) != 0)
+ return (ret);
+ TAILQ_INIT(&tmgrp->txn_chain);
+ tmgrp->dbenv = dbenv;
+
+ /* Join/create the txn region. */
+ tmgrp->reginfo.type = REGION_TYPE_TXN;
+ tmgrp->reginfo.id = INVALID_REGION_ID;
+ tmgrp->reginfo.mode = dbenv->db_mode;
+ tmgrp->reginfo.flags = REGION_JOIN_OK;
+ if (F_ISSET(dbenv, DB_ENV_CREATE))
+ F_SET(&tmgrp->reginfo, REGION_CREATE_OK);
+ if ((ret = __db_r_attach(dbenv,
+ &tmgrp->reginfo, __txn_region_size(dbenv))) != 0)
+ goto err;
+
+ /* If we created the region, initialize it. */
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ if ((ret = __txn_init(dbenv, tmgrp)) != 0)
+ goto err;
+
+ /* Set the local addresses. */
+ tmgrp->reginfo.primary =
+ R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary);
+
+ /* Acquire a mutex to protect the active TXN list. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &tmgrp->reginfo, &tmgrp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK | MUTEX_THREAD)) != 0)
+ goto err;
+
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ dbenv->tx_handle = tmgrp;
+ return (0);
+
+err: if (tmgrp->reginfo.addr != NULL) {
+ if (F_ISSET(&tmgrp->reginfo, REGION_CREATE))
+ ret = __db_panic(dbenv, ret);
+ R_UNLOCK(dbenv, &tmgrp->reginfo);
+
+ (void)__db_r_detach(dbenv, &tmgrp->reginfo, 0);
+ }
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+ __os_free(dbenv, tmgrp);
+ return (ret);
+}
+
+/*
+ * __txn_init --
+ * Initialize a transaction region in shared memory.
+ */
+static int
+__txn_init(dbenv, tmgrp)
+ DB_ENV *dbenv;
+ DB_TXNMGR *tmgrp;
+{
+ DB_LSN last_ckp;
+ DB_TXNREGION *region;
+ int ret;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
+
+ /*
+ * Find the last checkpoint in the log.
+ */
+ ZERO_LSN(last_ckp);
+ if (LOGGING_ON(dbenv)) {
+ /*
+ * The log system has already walked through the last
+ * file. Get the LSN of a checkpoint it may have found.
+ */
+ __log_get_cached_ckp_lsn(dbenv, &last_ckp);
+
+ /*
+ * If that didn't work, look backwards from the beginning of
+ * the last log file until we find the last checkpoint.
+ */
+ if (IS_ZERO_LSN(last_ckp) &&
+ (ret = __txn_findlastckp(dbenv, &last_ckp)) != 0)
+ return (ret);
+ }
+
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for the transaction region");
+ return (ret);
+ }
+ tmgrp->reginfo.rp->primary =
+ R_OFFSET(&tmgrp->reginfo, tmgrp->reginfo.primary);
+ region = tmgrp->reginfo.primary;
+ memset(region, 0, sizeof(*region));
+
+ region->maxtxns = dbenv->tx_max;
+ region->last_txnid = TXN_MINIMUM;
+ region->cur_maxid = TXN_MAXIMUM;
+ region->last_ckp = last_ckp;
+ region->time_ckp = time(NULL);
+
+ /*
+ * XXX
+ * If we ever do more types of locking and logging, this changes.
+ */
+ region->logtype = 0;
+ region->locktype = 0;
+
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+
+ SH_TAILQ_INIT(&region->active_txn);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the txn maintenance info and initialize it. */
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(REGMAINT) + TXN_MAINT_SIZE, 0, &addr)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for mutex maintenance");
+ return (ret);
+ }
+ __db_maintinit(&tmgrp->reginfo, addr, TXN_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&tmgrp->reginfo, addr);
+#endif
+ return (0);
+}
+
+/*
+ * __txn_findlastckp --
+ * Find the last checkpoint in the log, walking backwards from the
+ * beginning of the last log file. (The log system looked through
+ * the last log file when it started up.)
+ */
+static int
+__txn_findlastckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ DBT dbt;
+ int ret, t_ret;
+ u_int32_t rectype;
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /* Get the last LSN. */
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_LAST)) != 0)
+ goto err;
+
+ /*
+ * Twiddle the last LSN so it points to the beginning of the last
+ * file; we know there's no checkpoint after that, since the log
+ * system already looked there.
+ */
+ lsn.offset = 0;
+
+ /* Read backwards, looking for checkpoints. */
+ while ((ret = logc->get(logc, &lsn, &dbt, DB_PREV)) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp) {
+ *lsnp = lsn;
+ break;
+ }
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * Not finding a checkpoint is not an error; there may not exist
+ * one in the log.
+ */
+ return ((ret == 0 || ret == DB_NOTFOUND) ? 0 : ret);
+}
+
+/*
+ * __txn_dbenv_refresh --
+ * Clean up after the transaction system on a close or failed open.
+ * Called only from __dbenv_refresh. (Formerly called __txn_close.)
+ *
+ * PUBLIC: int __txn_dbenv_refresh __P((DB_ENV *));
+ */
+int
+__txn_dbenv_refresh(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXN *txnp;
+ DB_TXNMGR *tmgrp;
+ u_int32_t txnid;
+ int ret, t_ret;
+
+ ret = 0;
+ tmgrp = dbenv->tx_handle;
+
+ /*
+ * This function can only be called once per process (i.e., not
+ * once per thread), so no synchronization is required.
+ *
+ * The caller is doing something wrong if close is called with
+ * active transactions. Try and abort any active transactions,
+ * but it's quite likely the aborts will fail because recovery
+ * won't find open files. If we can't abort any transaction,
+ * panic, we have to run recovery to get back to a known state.
+ */
+ if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) {
+ __db_err(dbenv,
+ "Error: closing the transaction region with active transactions");
+ ret = EINVAL;
+ while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) {
+ txnid = txnp->txnid;
+ if ((t_ret = txnp->abort(txnp)) != 0) {
+ __db_err(dbenv,
+ "Unable to abort transaction 0x%x: %s",
+ txnid, db_strerror(t_ret));
+ ret = __db_panic(dbenv, t_ret);
+ break;
+ }
+ }
+ }
+
+ /* Flush the log. */
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = dbenv->log_flush(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Discard the per-thread lock. */
+ if (tmgrp->mutexp != NULL)
+ __db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
+
+ /* Detach from the region. */
+ if ((t_ret = __db_r_detach(dbenv, &tmgrp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, tmgrp);
+
+ dbenv->tx_handle = NULL;
+ return (ret);
+}
+
+/*
+ * __txn_region_size --
+ * Return the amount of space needed for the txn region. Make the
+ * region large enough to hold txn_max transaction detail structures
+ * plus some space to hold thread handles and the beginning of the
+ * shalloc region and anything we need for mutex system resource
+ * recording.
+ */
+static size_t
+__txn_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(DB_TXNREGION) +
+ dbenv->tx_max * sizeof(TXN_DETAIL) + 10 * 1024;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + TXN_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __txn_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __txn_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__txn_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_TXNREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __txn_id_set --
+ * Set the current transaction ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__txn_id_set(dbenv, cur_txnid, max_txnid)
+ DB_ENV *dbenv;
+ u_int32_t cur_txnid, max_txnid;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int ret;
+
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_id_set", DB_INIT_TXN);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ region->last_txnid = cur_txnid;
+ region->cur_maxid = max_txnid;
+
+ ret = 0;
+ if (cur_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Current ID value %lu below minimum",
+ cur_txnid);
+ ret = EINVAL;
+ }
+ if (max_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Maximum ID value %lu below minimum",
+ max_txnid);
+ ret = EINVAL;
+ }
+ return (ret);
+}
+#endif
diff --git a/storage/bdb/txn/txn_stat.c b/storage/bdb/txn/txn_stat.c
new file mode 100644
index 00000000000..f7d84e8f4c6
--- /dev/null
+++ b/storage/bdb/txn/txn_stat.c
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_stat.c,v 11.15 2002/04/26 23:00:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+/*
+ * __txn_stat --
+ *
+ * PUBLIC: int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ */
+int
+__txn_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_TXN_STAT **statp;
+ u_int32_t flags;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ DB_TXN_STAT *stats;
+ TXN_DETAIL *txnp;
+ size_t nbytes;
+ u_int32_t ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_stat", DB_INIT_TXN);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->txn_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * Allocate for the maximum active transactions -- the DB_TXN_ACTIVE
+ * struct is small and the maximum number of active transactions is
+ * not going to be that large. Don't have to lock anything to look
+ * at the region's maximum active transactions value, it's read-only
+ * and never changes after the region is created.
+ */
+ nbytes = sizeof(DB_TXN_STAT) + sizeof(DB_TXN_ACTIVE) * region->maxtxns;
+ if ((ret = __os_umalloc(dbenv, nbytes, &stats)) != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_last_txnid = region->last_txnid;
+ stats->st_last_ckp = region->last_ckp;
+ stats->st_time_ckp = region->time_ckp;
+ stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1];
+
+ ndx = 0;
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
+ stats->st_txnarray[ndx].txnid = txnp->txnid;
+ if (txnp->parent == INVALID_ROFF)
+ stats->st_txnarray[ndx].parentid = TXN_INVALID;
+ else
+ stats->st_txnarray[ndx].parentid =
+ ((TXN_DETAIL *)R_ADDR(&mgr->reginfo,
+ txnp->parent))->txnid;
+ stats->st_txnarray[ndx].lsn = txnp->begin_lsn;
+ ndx++;
+ }
+
+ stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = mgr->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mgr->reginfo.rp->mutex.mutex_set_wait = 0;
+ mgr->reginfo.rp->mutex.mutex_set_nowait = 0;
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+ region->stat.st_maxnactive =
+ region->stat.st_nactive = stats->st_nactive;
+ }
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/storage/bdb/txn/txn_util.c b/storage/bdb/txn/txn_util.c
new file mode 100644
index 00000000000..cbfbc419615
--- /dev/null
+++ b/storage/bdb/txn/txn_util.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_util.c,v 11.18 2002/08/06 06:25:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+typedef struct __txn_event TXN_EVENT;
+struct __txn_event {
+ TXN_EVENT_T op;
+ TAILQ_ENTRY(__txn_event) links;
+ union {
+ struct {
+ /* Delayed remove. */
+ char *name;
+ u_int8_t *fileid;
+ } r;
+ struct {
+ /* Lock event. */
+ DB_LOCK lock;
+ u_int32_t locker;
+ DB *dbp;
+ } t;
+ } u;
+};
+
+/*
+ * __txn_remevent --
+ *
+ * Creates a remove event that can be added to the commit list.
+ *
+ * PUBLIC: int __txn_remevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int8_t*));
+ */
+int
+__txn_remevent(dbenv, txn, name, fileid)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ u_int8_t *fileid;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ if ((ret = __os_strdup(dbenv, name, &e->u.r.name)) != 0)
+ goto err;
+
+ if (fileid != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, DB_FILE_ID_LEN, &e->u.r.fileid)) != 0)
+ return (ret);
+ memcpy(e->u.r.fileid, fileid, DB_FILE_ID_LEN);
+ }
+
+ e->op = TXN_REMOVE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+
+err: if (e != NULL)
+ __os_free(dbenv, e);
+
+ return (ret);
+}
+
+/*
+ * __txn_lockevent --
+ *
+ * Add a lockevent to the commit-queue. The lock event indicates a locker
+ * trade.
+ *
+ * PUBLIC: int __txn_lockevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+ */
+int
+__txn_lockevent(dbenv, txn, dbp, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB *dbp;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ if (!LOCKING_ON(dbenv))
+ return (0);
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ e->u.t.locker = locker;
+ e->u.t.lock = *lock;
+ e->u.t.dbp = dbp;
+ e->op = TXN_TRADE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+}
+
+/*
+ * __txn_remlock --
+ * Remove a lock event because the locker is going away. We can remove
+ * by lock (using offset) or by locker_id (or by both).
+ *
+ * PUBLIC: void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+ */
+void
+__txn_remlock(dbenv, txn, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ TXN_EVENT *e, *next_e;
+
+ for (e = TAILQ_FIRST(&txn->events); e != NULL; e = next_e) {
+ next_e = TAILQ_NEXT(e, links);
+ if ((e->op != TXN_TRADE && e->op != TXN_TRADED) ||
+ (e->u.t.lock.off != lock->off && e->u.t.locker != locker))
+ continue;
+ TAILQ_REMOVE(&txn->events, e, links);
+ __os_free(dbenv, e);
+ }
+
+ return;
+}
+
+/*
+ * __txn_doevents --
+ * Process the list of events associated with a transaction. On commit,
+ * apply the events; on abort, just toss the entries.
+ *
+ * PUBLIC: int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+ */
+#define DO_TRADE do { \
+ memset(&req, 0, sizeof(req)); \
+ req.lock = e->u.t.lock; \
+ req.op = DB_LOCK_TRADE; \
+ t_ret = __lock_vec(dbenv, e->u.t.locker, 0, &req, 1, NULL); \
+ if (t_ret == 0) \
+ e->u.t.dbp->cur_lid = e->u.t.locker; \
+ else if (t_ret == DB_NOTFOUND) \
+ t_ret = 0; \
+ if (t_ret != 0 && ret == 0) \
+ ret = t_ret; \
+ e->op = TXN_TRADED; \
+} while (0)
+
+int
+__txn_doevents(dbenv, txn, is_commit, preprocess)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int is_commit, preprocess;
+{
+ DB_LOCKREQ req;
+ TXN_EVENT *e;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * This phase only gets called if we have a phase where we
+ * release read locks. Since not all paths will call this
+ * phase, we have to check for it below as well. So, when
+ * we do the trade, we update the opcode of the entry so that
+ * we don't try the trade again.
+ */
+ if (preprocess) {
+ for (e = TAILQ_FIRST(&txn->events);
+ e != NULL; e = TAILQ_NEXT(e, links)) {
+ if (e->op != TXN_TRADE)
+ continue;
+ DO_TRADE;
+ }
+ return (ret);
+ }
+
+ while ((e = TAILQ_FIRST(&txn->events)) != NULL) {
+ TAILQ_REMOVE(&txn->events, e, links);
+ if (!is_commit)
+ goto dofree;
+ switch (e->op) {
+ case TXN_REMOVE:
+ if (e->u.r.fileid != NULL) {
+ if ((t_ret = dbenv->memp_nameop(dbenv,
+ e->u.r.fileid,
+ NULL, e->u.r.name, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.fileid);
+ } else if ((t_ret =
+ __os_unlink(dbenv, e->u.r.name)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.name);
+ break;
+ case TXN_TRADE:
+ DO_TRADE;
+ /* Fall through */
+ case TXN_TRADED:
+ /* Downgrade the lock. */
+ if ((t_ret = __lock_downgrade(dbenv,
+ &e->u.t.lock, DB_LOCK_READ, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ default:
+ /* This had better never happen. */
+ DB_ASSERT(0);
+ }
+dofree: __os_free(dbenv, e);
+ }
+
+ return (ret);
+}
diff --git a/storage/bdb/xa/xa.c b/storage/bdb/xa/xa.c
new file mode 100644
index 00000000000..6667d14c2bf
--- /dev/null
+++ b/storage/bdb/xa/xa.c
@@ -0,0 +1,539 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa.c,v 11.23 2002/08/29 14:22:25 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+static int __db_xa_close __P((char *, int, long));
+static int __db_xa_commit __P((XID *, int, long));
+static int __db_xa_complete __P((int *, int *, int, long));
+static int __db_xa_end __P((XID *, int, long));
+static int __db_xa_forget __P((XID *, int, long));
+static int __db_xa_open __P((char *, int, long));
+static int __db_xa_prepare __P((XID *, int, long));
+static int __db_xa_recover __P((XID *, long, int, long));
+static int __db_xa_rollback __P((XID *, int, long));
+static int __db_xa_start __P((XID *, int, long));
+static void __xa_txn_end __P((DB_TXN *));
+
+/*
+ * Possible flag values:
+ * Dynamic registration 0 => no dynamic registration
+ * TMREGISTER => dynamic registration
+ * Asynchronous operation 0 => no support for asynchrony
+ * TMUSEASYNC => async support
+ * Migration support 0 => migration of transactions across
+ * threads is possible
+ * TMNOMIGRATE => no migration across threads
+ */
+const struct xa_switch_t db_xa_switch = {
+ "Berkeley DB", /* name[RMNAMESZ] */
+ TMNOMIGRATE, /* flags */
+ 0, /* version */
+ __db_xa_open, /* xa_open_entry */
+ __db_xa_close, /* xa_close_entry */
+ __db_xa_start, /* xa_start_entry */
+ __db_xa_end, /* xa_end_entry */
+ __db_xa_rollback, /* xa_rollback_entry */
+ __db_xa_prepare, /* xa_prepare_entry */
+ __db_xa_commit, /* xa_commit_entry */
+ __db_xa_recover, /* xa_recover_entry */
+ __db_xa_forget, /* xa_forget_entry */
+ __db_xa_complete /* xa_complete_entry */
+};
+
+/*
+ * __db_xa_open --
+ * The open call in the XA protocol. The rmid field is an id number
+ * that the TM assigned us and will pass us on every xa call. We need to
+ * map that rmid number into a dbenv structure that we create during
+ * initialization. Since this id number is thread specific, we do not
+ * need to store it in shared memory. The file xa_map.c implements all
+ * such xa->db mappings.
+ * The xa_info field is instance specific information. We require
+ * that the value of DB_HOME be passed in xa_info. Since xa_info is the
+ * only thing that we get to pass to db_env_create, any config information
+ * will have to be done via a config file instead of via the db_env_create
+ * call.
+ */
+static int
+__db_xa_open(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* Verify if we already have this environment open. */
+ if (__db_rmid_to_env(rmid, &env) == 0)
+ return (XA_OK);
+ if (__os_calloc(env, 1, sizeof(DB_ENV), &env) != 0)
+ return (XAER_RMERR);
+
+ /* Open a new environment. */
+#define XA_FLAGS \
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN
+ if (db_env_create(&env, 0) != 0)
+ return (XAER_RMERR);
+ if (env->open(env, xa_info, XA_FLAGS, 0) != 0)
+ goto err;
+
+ /* Create the mapping. */
+ if (__db_map_rmid(rmid, env) != 0)
+ goto err;
+
+ /* Allocate space for the current transaction. */
+ if (__os_calloc(env, 1, sizeof(DB_TXN), &env->xa_txn) != 0)
+ goto err;
+ env->xa_txn->txnid = TXN_INVALID;
+
+ return (XA_OK);
+
+err: (void)env->close(env, 0);
+
+ return (XAER_RMERR);
+}
+
+/*
+ * __db_xa_close --
+ * The close call of the XA protocol. The only trickiness here
+ * is that if there are any active transactions, we must fail. It is
+ * *not* an error to call close on an environment that has already been
+ * closed (I am interpreting that to mean it's OK to call close on an
+ * environment that has never been opened).
+ */
+static int
+__db_xa_close(xa_info, rmid, flags)
+ char *xa_info;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ int ret, t_ret;
+
+ COMPQUIET(xa_info, NULL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XA_OK);
+
+ /* Check if there are any pending transactions. */
+ if (env->xa_txn != NULL && env->xa_txn->txnid != TXN_INVALID)
+ return (XAER_PROTO);
+
+ /* Destroy the mapping. */
+ ret = __db_unmap_rmid(rmid);
+
+ /* Discard space held for the current transaction. */
+ if (env->xa_txn != NULL)
+ __os_free(env, env->xa_txn);
+
+ /* Close the environment. */
+ if ((t_ret = env->close(env, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret == 0 ? XA_OK : XAER_RMERR);
+}
+
+/*
+ * __db_xa_start --
+ * Begin a transaction for the current resource manager.
+ */
+static int
+__db_xa_start(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+ int is_known;
+
+#define OK_FLAGS (TMJOIN | TMRESUME | TMNOWAIT | TMASYNC | TMNOFLAGS)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMJOIN) && LF_ISSET(TMRESUME))
+ return (XAER_INVAL);
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ is_known = __db_xid_to_txn(env, xid, &off) == 0;
+
+ if (is_known && !LF_ISSET(TMRESUME) && !LF_ISSET(TMJOIN))
+ return (XAER_DUPID);
+
+ if (!is_known && LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_NOTA);
+
+ /*
+ * This can't block, so we can ignore TMNOWAIT.
+ *
+ * Other error conditions: RMERR, RMFAIL, OUTSIDE, PROTO, RB*
+ */
+ if (is_known) {
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_SUSPENDED &&
+ !LF_ISSET(TMRESUME | TMJOIN))
+ return (XAER_PROTO);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+ td->xa_status = TXN_XA_STARTED;
+ } else {
+ if (__txn_xa_begin(env, env->xa_txn) != 0)
+ return (XAER_RMERR);
+ (void)__db_map_xid(env, xid, env->xa_txn->off);
+ td = (TXN_DETAIL *)
+ R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo,
+ env->xa_txn->off);
+ td->xa_status = TXN_XA_STARTED;
+ }
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_end --
+ * Disassociate the current transaction from the current process.
+ */
+static int
+__db_xa_end(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ DB_TXN *txn;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (flags != TMNOFLAGS && !LF_ISSET(TMSUSPEND | TMSUCCESS | TMFAIL))
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ txn = env->xa_txn;
+ if (off != txn->off)
+ return (XAER_PROTO);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->status == TXN_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_STARTED)
+ return (XAER_PROTO);
+
+ /* Update the shared memory last_lsn field */
+ td->last_lsn = txn->last_lsn;
+
+ /*
+ * If we ever support XA migration, we cannot keep SUSPEND/END
+ * status in the shared region; it would have to be process local.
+ */
+ if (LF_ISSET(TMSUSPEND))
+ td->xa_status = TXN_XA_SUSPENDED;
+ else
+ td->xa_status = TXN_XA_ENDED;
+
+ txn->txnid = TXN_INVALID;
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_prepare --
+ * Sync the log to disk so we can guarantee recoverability.
+ */
+static int
+__db_xa_prepare(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * As part of the prepare, we set the xa_status field to
+ * reflect that fact that prepare has been called, and if
+ * it's ever called again, it's an error.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+
+ if (env->xa_txn->prepare(env->xa_txn, (u_int8_t *)xid->data) != 0)
+ return (XAER_RMERR);
+
+ td->xa_status = TXN_XA_PREPARED;
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_commit --
+ * Commit the transaction
+ */
+static int
+__db_xa_commit(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+#undef OK_FLAGS
+#define OK_FLAGS (TMNOFLAGS | TMNOWAIT | TMONEPHASE)
+ if (LF_ISSET(~OK_FLAGS))
+ return (XAER_INVAL);
+
+ /*
+ * We need to know if we've ever called prepare on this.
+ * We can verify this by examining the xa_status field.
+ */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (LF_ISSET(TMONEPHASE) &&
+ td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED)
+ return (XAER_PROTO);
+
+ if (!LF_ISSET(TMONEPHASE) && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+
+ if (env->xa_txn->commit(env->xa_txn, 0) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_recover --
+ * Returns a list of prepared and heuristically completed transactions.
+ *
+ * The return value is the number of xids placed into the xid array (less
+ * than or equal to the count parameter). The flags are going to indicate
+ * whether we are starting a scan or continuing one.
+ */
+static int
+__db_xa_recover(xids, count, rmid, flags)
+ XID *xids;
+ long count, flags;
+ int rmid;
+{
+ DB_ENV *env;
+ u_int32_t newflags;
+ long rval;
+
+ /* If the environment is closed, then we're done. */
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (LF_ISSET(TMSTARTRSCAN))
+ newflags = DB_FIRST;
+ else if (LF_ISSET(TMENDRSCAN))
+ newflags = DB_LAST;
+ else
+ newflags = DB_NEXT;
+
+ rval = 0;
+ if (__txn_get_prepared(env, xids, NULL, count, &rval, newflags) != 0)
+ return (XAER_RMERR);
+ else
+ return (rval);
+}
+
+/*
+ * __db_xa_rollback
+ * Abort an XA transaction.
+ */
+static int
+__db_xa_rollback(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ TXN_DETAIL *td;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XAER_NOTA);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ if (td->xa_status == TXN_XA_DEADLOCKED)
+ return (XA_RBDEADLOCK);
+
+ if (td->xa_status == TXN_XA_ABORTED)
+ return (XA_RBOTHER);
+
+ if (td->xa_status != TXN_XA_ENDED && td->xa_status != TXN_XA_SUSPENDED
+ && td->xa_status != TXN_XA_PREPARED)
+ return (XAER_PROTO);
+
+ /* Now, fill in the global transaction structure. */
+ __txn_continue(env, env->xa_txn, td, off);
+ if (env->xa_txn->abort(env->xa_txn) != 0)
+ return (XAER_RMERR);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ __xa_txn_end(env->xa_txn);
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_forget --
+ * Forget about an XID for a transaction that was heuristically
+ * completed. Since we do not heuristically complete anything, I
+ * don't think we have to do anything here, but we should make sure
+ * that we reclaim the slots in the txnid table.
+ */
+static int
+__db_xa_forget(xid, rmid, flags)
+ XID *xid;
+ int rmid;
+ long flags;
+{
+ DB_ENV *env;
+ size_t off;
+
+ if (LF_ISSET(TMASYNC))
+ return (XAER_ASYNC);
+ if (flags != TMNOFLAGS)
+ return (XAER_INVAL);
+
+ if (__db_rmid_to_env(rmid, &env) != 0)
+ return (XAER_PROTO);
+
+ /*
+ * If mapping is gone, then we're done.
+ */
+ if (__db_xid_to_txn(env, xid, &off) != 0)
+ return (XA_OK);
+
+ __db_unmap_xid(env, xid, off);
+
+ /* No fatal value that would require an XAER_RMFAIL. */
+ return (XA_OK);
+}
+
+/*
+ * __db_xa_complete --
+ * Used to wait for asynchronous operations to complete. Since we're
+ * not doing asynch, this is an invalid operation.
+ */
+static int
+__db_xa_complete(handle, retval, rmid, flags)
+ int *handle, *retval, rmid;
+ long flags;
+{
+ COMPQUIET(handle, NULL);
+ COMPQUIET(retval, NULL);
+ COMPQUIET(rmid, 0);
+ COMPQUIET(flags, 0);
+
+ return (XAER_INVAL);
+}
+
+/*
+ * __xa_txn_end --
+ * Invalidate a transaction structure that was generated by __txn_continue.
+ */
+static void
+__xa_txn_end(txn)
+ DB_TXN *txn;
+{
+ if (txn != NULL)
+ txn->txnid = TXN_INVALID;
+}
diff --git a/storage/bdb/xa/xa_db.c b/storage/bdb/xa/xa_db.c
new file mode 100644
index 00000000000..b84bb1c9fa9
--- /dev/null
+++ b/storage/bdb/xa/xa_db.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa_db.c,v 11.21 2002/08/29 14:22:25 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/xa.h"
+#include "dbinc/txn.h"
+
+static int __xa_close __P((DB *, u_int32_t));
+static int __xa_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+static int __xa_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+static int __xa_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __xa_open __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+static int __xa_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+
+typedef struct __xa_methods {
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+} XA_METHODS;
+
+/*
+ * __db_xa_create --
+ * DB XA constructor.
+ *
+ * PUBLIC: int __db_xa_create __P((DB *));
+ */
+int
+__db_xa_create(dbp)
+ DB *dbp;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ /*
+ * Interpose XA routines in front of any method that takes a TXN
+ * ID as an argument.
+ */
+ if ((ret = __os_calloc(dbp->dbenv, 1, sizeof(XA_METHODS), &xam)) != 0)
+ return (ret);
+
+ dbp->xa_internal = xam;
+ xam->open = dbp->open;
+ dbp->open = __xa_open;
+ xam->close = dbp->close;
+ dbp->close = __xa_close;
+
+ return (0);
+}
+
+/*
+ * __xa_open --
+ * XA open wrapper.
+ */
+
+static int
+__xa_open(dbp, txn, name, subdb, type, flags, mode)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ XA_METHODS *xam;
+ int ret;
+
+ xam = (XA_METHODS *)dbp->xa_internal;
+
+ if ((ret = xam->open(dbp, txn, name, subdb, type, flags, mode)) != 0)
+ return (ret);
+
+ xam->cursor = dbp->cursor;
+ xam->del = dbp->del;
+ xam->get = dbp->get;
+ xam->put = dbp->put;
+ dbp->cursor = __xa_cursor;
+ dbp->del = __xa_del;
+ dbp->get = __xa_get;
+ dbp->put = __xa_put;
+
+ return (0);
+}
+
+static int
+__xa_cursor(dbp, txn, dbcp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBC **dbcp;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->cursor (dbp, t, dbcp, flags));
+}
+
+static int
+__xa_del(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->del(dbp, t, key, flags));
+}
+
+static int
+__xa_close(dbp, flags)
+ DB *dbp;
+ u_int32_t flags;
+{
+ int (*real_close) __P((DB *, u_int32_t));
+
+ real_close = ((XA_METHODS *)dbp->xa_internal)->close;
+
+ __os_free(dbp->dbenv, dbp->xa_internal);
+ dbp->xa_internal = NULL;
+
+ return (real_close(dbp, flags));
+}
+
+static int
+__xa_get(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->get
+ (dbp, t, key, data, flags));
+}
+
+static int
+__xa_put(dbp, txn, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DB_TXN *t;
+
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
+ if (t->txnid == TXN_INVALID)
+ t = NULL;
+
+ return (((XA_METHODS *)dbp->xa_internal)->put
+ (dbp, t, key, data, flags));
+}
diff --git a/storage/bdb/xa/xa_map.c b/storage/bdb/xa/xa_map.c
new file mode 100644
index 00000000000..42fa4b20ed2
--- /dev/null
+++ b/storage/bdb/xa/xa_map.c
@@ -0,0 +1,167 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: xa_map.c,v 11.19 2002/09/03 14:58:27 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+/*
+ * This file contains all the mapping information that we need to support
+ * the DB/XA interface.
+ */
+
+/*
+ * __db_rmid_to_env
+ * Return the environment associated with a given XA rmid.
+ *
+ * PUBLIC: int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+ */
+int
+__db_rmid_to_env(rmid, envp)
+ int rmid;
+ DB_ENV **envp;
+{
+ DB_ENV *env;
+
+ env = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ if (env != NULL && env->xa_rmid == rmid) {
+ *envp = env;
+ return (0);
+ }
+
+ /*
+ * When we map an rmid, move that environment to be the first one in
+ * the list of environments, so we acquire the correct environment
+ * in DB->open.
+ */
+ for (; env != NULL; env = TAILQ_NEXT(env, links))
+ if (env->xa_rmid == rmid) {
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), env, links);
+ TAILQ_INSERT_HEAD(&DB_GLOBAL(db_envq), env, links);
+ *envp = env;
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * __db_xid_to_txn
+ * Return the txn that corresponds to this XID.
+ *
+ * PUBLIC: int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+ */
+int
+__db_xid_to_txn(dbenv, xid, offp)
+ DB_ENV *dbenv;
+ XID *xid;
+ size_t *offp;
+{
+ struct __txn_detail *td;
+
+ return (__txn_map_gid(dbenv, (u_int8_t *)xid->data, &td, offp));
+}
+
+/*
+ * __db_map_rmid
+ * Create a mapping between the specified rmid and environment.
+ *
+ * PUBLIC: int __db_map_rmid __P((int, DB_ENV *));
+ */
+int
+__db_map_rmid(rmid, env)
+ int rmid;
+ DB_ENV *env;
+{
+ env->xa_rmid = rmid;
+ TAILQ_INSERT_TAIL(&DB_GLOBAL(db_envq), env, links);
+ return (0);
+}
+
+/*
+ * __db_unmap_rmid
+ * Destroy the mapping for the given rmid.
+ *
+ * PUBLIC: int __db_unmap_rmid __P((int));
+ */
+int
+__db_unmap_rmid(rmid)
+ int rmid;
+{
+ DB_ENV *e;
+
+ for (e = TAILQ_FIRST(&DB_GLOBAL(db_envq));
+ e->xa_rmid != rmid;
+ e = TAILQ_NEXT(e, links));
+
+ if (e == NULL)
+ return (EINVAL);
+
+ TAILQ_REMOVE(&DB_GLOBAL(db_envq), e, links);
+ return (0);
+}
+
+/*
+ * __db_map_xid
+ * Create a mapping between this XID and the transaction at
+ * "off" in the shared region.
+ *
+ * PUBLIC: int __db_map_xid __P((DB_ENV *, XID *, size_t));
+ */
+int
+__db_map_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ REGINFO *infop;
+ TXN_DETAIL *td;
+
+ infop = &((DB_TXNMGR *)env->tx_handle)->reginfo;
+ td = (TXN_DETAIL *)R_ADDR(infop, off);
+
+ R_LOCK(env, infop);
+ memcpy(td->xid, xid->data, XIDDATASIZE);
+ td->bqual = (u_int32_t)xid->bqual_length;
+ td->gtrid = (u_int32_t)xid->gtrid_length;
+ td->format = (int32_t)xid->formatID;
+ R_UNLOCK(env, infop);
+
+ return (0);
+}
+
+/*
+ * __db_unmap_xid
+ * Destroy the mapping for the specified XID.
+ *
+ * PUBLIC: void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+ */
+
+void
+__db_unmap_xid(env, xid, off)
+ DB_ENV *env;
+ XID *xid;
+ size_t off;
+{
+ TXN_DETAIL *td;
+
+ COMPQUIET(xid, NULL);
+
+ td = (TXN_DETAIL *)R_ADDR(&((DB_TXNMGR *)env->tx_handle)->reginfo, off);
+ memset(td->xid, 0, sizeof(td->xid));
+}